metadata
dict
text
stringlengths
60
3.49M
{ "source": "jennyxue1997/MITRedbullBackend", "score": 3 }
#### File: jennyxue1997/MITRedbullBackend/champ_select.py ```python from google.cloud import bigquery from google.oauth2 import service_account import json import numpy as np CREDENTIALS = service_account.Credentials.from_service_account_file( "/Users/jennyxue/Desktop/6S198/redbull_creds.json") PROJECT_ID = "spurs-sp2018" def get_best_champ_select(request): """ Parses the request and returns the list of champs that should be selected next. """ payload = json.loads(request.data) team = payload["team"] enemy = payload["enemy"] preferred = payload.get("preferred", None) bans = payload.get("bans", None) num = payload.get("num", None) return get_pick_given_selections_by_winrate(team, enemy, preferred=preferred, bans=bans)[:num] def get_pick_given_selections_by_winrate(team, opponent, preferred=None, bans=None): """ Compiles and runs the SQL query """ client = bigquery.Client(credentials=CREDENTIALS, project=PROJECT_ID) query = "SELECT * FROM `spurs-sp2018.league_of_legends.PositionalMatchups` " games_query = "ORDER BY WinRate ASC, Games ASC LIMIT {}".format(500) champ_query = get_champ_query(team, opponent) ban_query = get_ban_query(bans, team, opponent) if champ_query != "" and ban_query != "": champ_query += " AND " if champ_query != "" or ban_query != "": query += "WHERE " query = query + champ_query + ban_query + games_query print(query) # if champ_query == "" and ban_query == "": # return [("Aurelion Sol", "MIDDLE", 0.5402390438247012), # ("Heimerdinger", "ADC", 0.5385117493472585), # ("Shaco", "JUNGLE", 0.5328070232908573), # ("Viktor", "TOP", 0.5205676676264912), # ("Sona", "SUPPORT", 0.5189737286833614)] query_job = client.query(query) results = query_job.result().to_dataframe().to_dict("records") return get_champions_for_position_given_opp_by_winrate(results, preferred, bans) def get_champ_query(team, opponent): """ Helper function for champ query """ champ_query = "(" for r in team.keys(): if opponent[r] != "" and team[r] == "": champ_query += """(Champ="{}" AND Position="{}" AND GAMES > 60) OR """.format(opponent[r], r) if champ_query != "(": champ_query = champ_query[:-4] + ")" else: champ_query = "" return champ_query def get_ban_query(bans, team, enemy): """ Helper function for ban query """ ban_query = "(" if bans != None: for ban in bans: ban_query += """(Matchup!="{}") AND """.format(ban) for t in list(team.values()): if t != "": ban_query += """(Matchup!="{}") AND """.format(t) for t in list(enemy.values()): if t != "": ban_query += """(Matchup!="{}") AND """.format(t) if ban_query != "(": ban_query = ban_query[:-5] + ")" else: ban_query = "" return ban_query def get_champions_for_position_given_opp_by_winrate(results, preferred, bans): """ Helper function for ban query """ # find enemy matchup with lowest winrate possible_champs = [] for matchup in results: matchup_champ = matchup["Matchup"] win_rate = round((0.5 - matchup["WinRate"]) * 100, 2) position = matchup["Position"] # only considers matchups with greater than 100 games, in preferred, and not in bans # if(preferred == None or (preferred != None and matchup_champ in preferred)) and (bans == None or (bans != None and matchup_champ not in bans)): if position == "ADCSUPPORT" or position == "DUO_SUPPORT": position = "SUPPORT" if position == "DUO_CARRY": position = "ADC" if position != "SYNERGY": possible_champs.append((matchup_champ, win_rate, position)) possible_champs.sort(key=lambda x: x[1]) return possible_champs[::-1] # team = {"TOP": "", "JUNGLE": "", "MIDDLE": "", "DUO_CARRY": "Tristana", "DUO_SUPPORT": "Leona"} # enemy = {"TOP": "", "JUNGLE": "", "MIDDLE": "Zed", "DUO_CARRY": "", "DUO_SUPPORT": "Nami"} # bans = ["Gallio", "Shyvana"] # print(get_pick_given_selections_by_winrate(team, enemy, bans=bans)) ```
{ "source": "jennyxue1997/seefoodbackend", "score": 2 }
#### File: seefoodbackend/app/recommend.py ```python from google.cloud import bigquery from google.oauth2 import service_account from google.cloud import automl_v1beta1 from google.cloud.automl_v1beta1.proto import service_pb2 from werkzeug.utils import secure_filename import json import pandas as pd import math CREDENTIALS = service_account.Credentials.from_service_account_file( "SeeFood_Credentials.json") PROJECT_ID = "seefood-224203" UPLOAD_FOLDER = 'img' ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg']) RECOMMENDATIONS = {400: ["Tortellini, pasta with cheese filling, fresh-refrigerated, as purchased", "Beef stew, canned entree", "Pork, fresh, spareribs, separable lean and fat, cooked, braised"], 300: ["Potatoes, baked, skin, with salt", "Spinach souffle", "Potato salad, home-prepared"], 200: ["Sweet potato, cooked, candied, home-prepared", "CAMPBELL Soup Company, CAMPBELL'S Red and White, Bean with Bacon Soup, condensed", "Avocados, raw, California"], 100: ["Nuts, chestnuts, japanese, boiled and steamed", "Yogurt, vanilla, low fat, 11 grams protein per 8 ounce, fortified with vitamin D"]} FOOD_DICT = { "Tortellini, pasta with cheese filling, fresh-refrigerated, as purchased": "Cheese Tortellini", "Beef stew, canned entree": "Beef Stew", "Pork, fresh, spareribs, separable lean and fat, cooked, braised": "Braised Spareribs", "Potatoes, baked, skin, with salt": "Baked Potatoes", "Spinach souffle": "Spinach souffle", "Potato salad, home-prepared": "Potato Salad", "Sweet potato, cooked, candied, home-prepared": "Sweet Potato", "CAMPBELL Soup Company, CAMPBELL'S Red and White, Bean with Bacon Soup, condensed": "Bean & Bacon Soup", "Nuts, chestnuts, japanese, boiled and steamed": "Chestnuts", "Avocados, raw, California":"Avocado", "Yogurt, vanilla, low fat, 11 grams protein per 8 ounce, fortified with vitamin D": "Vanilla Yogurt" } def post_recommendations(request, client): # name = request.form["name"] calories_needed = int(request.form["calories_needed"]) rounded_calories = max(min(int(math.ceil(calories_needed / 100.0)) * 100, 400), 100) recommended_food = RECOMMENDATIONS[rounded_calories] food = get_food_under_calories(recommended_food, client) return food def get_food_under_calories(recommended_food, client): food_query = "" for food in recommended_food: food_query += "name='{}'".format(food) + " OR " food_query = food_query.strip("OR ") query = "SELECT * FROM `{}.seefood.Nutrition` WHERE ({}) ".format(PROJECT_ID, food_query) query_job = client.query(query) results = query_job.result().to_dataframe().to_dict("records") ans = [] for result in results: result["name"] = FOOD_DICT[result["name"]] ans.append(result) return ans ```
{ "source": "jennyxue97/Digest", "score": 3 }
#### File: jennyxue97/Digest/computer_vision.py ```python import requests import urllib from bs4 import BeautifulSoup import ssl ssl._create_default_https_context = ssl._create_unverified_context def parse_text_from_image(image_url): ocr_url = "https://eastus.api.cognitive.microsoft.com/vision/v2.0/ocr" headers = {'Ocp-Apim-Subscription-Key': subscription_key, "Content-Type": 'application/octet-stream'} params = {'detectOrientation': 'true', 'language': 'zh-Hans'} with open(image_url, 'rb') as f: data = f.read() response = requests.post(ocr_url, headers=headers, params=params, data=data) response.raise_for_status() analysis = response.json() items = [] for region in analysis["regions"]: for line in region["lines"]: item = "" words = line["words"] for word in words: try: int(word["text"]) except: if word["text"]!="元": item += (word["text"]) if item != "": items.append(item) return get_images_of_food(items) def get_images_of_food(items): imgs = [] for item in items: img_dict = {} url = 'https://www.douguo.com/search/recipe/' + urllib.parse.quote(item.encode("utf-8")) response = urllib.request.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') attributes = soup.find("a", class_="cook-img").attrs img = attributes["style"].split()[1][4:-1] img_dict["item"] = item img_dict["img"] = img attributes = soup.find("a", class_="cook-img").attrs food_id = attributes["href"].split("/")[2] img_dict["food_id"] = food_id imgs.append(img_dict) return imgs def get_ingredients(food_id, language): url = "https://www.douguo.com/cookbook/" + food_id response = urllib.request.urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') ingredients = soup.findAll("span", class_="scname") return translate_ingredients(list(map(lambda x: x.text, ingredients)), language) def translate_ingredients(ingredients, language): base_url = 'https://api.cognitive.microsofttranslator.com' path = '/translate?api-version=3.0' params = '&to=' + language constructed_url = base_url + path + params headers = { 'Ocp-Apim-Subscription-Key': subscriptionKey, 'Content-type': 'application/json' } body = [{ 'text' : ",".join(ingredients) }] request = requests.post(constructed_url, headers=headers, json=body) response = request.json()[0]["translations"][0]["text"] return response items = parse_text_from_image("public/menu4.png") print(items) # print(get_images_of_food(items)) ```
{ "source": "jennyxue97/PathfinderBackend", "score": 3 }
#### File: jennyxue97/PathfinderBackend/database.py ```python from google.cloud import bigquery from google.oauth2.service_account import Credentials import pandas as pd import numpy as np import pandas_gbq as pdg def post_itinerary(user_id, user_name, itinerary_name, itinerary_id, itinerary): columns = ["user_id", "user_name", "business_name", "url", "image_url", "review_count", "category_1", "category_2", "category_3", "rating", "location", "phone", "itinerary_id", "itinerary_name", "cluster_id", "query", "coordinates", "sort_order"] df = [] itinerary_id = itinerary_name itinerary_name = itinerary_name # client = bigquery.Client.from_service_account_json("Sherpa-3244e874fcf9.json") for cluster_id, cluster in enumerate(itinerary["clusters"]): for query in cluster: businesses = cluster[query] for order, business in enumerate(businesses): business_name = business["name"] url = business["url"] image_url = business["image_url"] review_count = business["review_count"] categories = [None, None, None] for idx, category in enumerate(business["categories"]): categories[idx] = category rating = business["rating"] location = business["location"] phone = business["phone"] coordinates = business["coordinates"] row = [user_id, user_name, business_name, url, image_url, review_count, categories[0], categories[1], categories[2], rating, location, phone, itinerary_id, itinerary_name, cluster_id, query, coordinates, order] df.append(row) dtypes = ["str", "str", "str", "str", "str", "int", "str", "str", "str", "float", "str", "str", "str", "str", "int", "str", "str", "int"] schemas = ["STRING", "STRING", "STRING", "STRING", "STRING", "INTEGER", "STRING", "STRING", "STRING", "FLOAT", "STRING", "STRING", "STRING", "STRING", "INTEGER", "STRING", "STRING", "INTEGER"] df = pd.DataFrame(df, columns=columns) table_schemas = [] for idx, schema in enumerate(schemas): table_schemas.append({"name": columns[idx], "type": schema}) df[columns[idx]] = df[columns[idx]].astype(dtypes[idx]) credentials = Credentials.from_service_account_file("Sherpa-3244e874fcf9.json") pdg.to_gbq(df, "Sherpa.Itineraries", project_id="sherpa-238315", chunksize=None, if_exists="append", credentials=credentials, table_schema=table_schemas) return {"done": True} def delete_itinerary(user_id, itinerary_id): client = bigquery.Client.from_service_account_json("Sherpa-3244e874fcf9.json") query = """ DELETE FROM `sherpa-238315.Sherpa.Itineraries` WHERE itinerary_id="%s" AND user_id="%s" """ %(itinerary_id, user_id) query_job = client.query(query) results = query_job.result() return results def get_itinerary(user_id, itinerary_id): client = bigquery.Client.from_service_account_json("Sherpa-3244e874fcf9.json") query = """ SELECT * FROM `sherpa-238315.Sherpa.Itineraries` WHERE itinerary_id='%s' AND user_id='%s' ORDER BY sort_order """%(itinerary_id, user_id) query_job = client.query(query) results = query_job.result() parsed_results = {"clusters": []} clusters = {} df = results.to_dataframe() for idx, row in df.iterrows(): results_keys = ["itinerary_id", "itinerary_name", "user_name", "user_id"] for results_key in results_keys: if results_key not in parsed_results: parsed_results[results_key] = row[results_key] if row["cluster_id"] not in clusters: clusters[row["cluster_id"]] = {} query = row["query"] if query not in clusters[row["cluster_id"]]: clusters[row["cluster_id"]][query] = [] categories = [] for i in range(1, 4): if row["category_" + str(i)] != "None": categories.append(row["category_" + str(i)]) query_dict = { "categories": categories, } columns = ["coordinates", "image_url", "url", "location", "phone", "rating", "review_count"] for column in columns: query_dict[column] = row[column] query_dict["name"] = row["business_name"] clusters[row["cluster_id"]][query].append(query_dict) for idx in sorted(clusters.keys()): parsed_results["clusters"].append(clusters[idx]) return parsed_results def post_itinerary_in_user(user_id, user_name, itinerary_name): itinerary_id = itinerary_name if is_duplicate_itinerary(user_id, itinerary_id, itinerary_name): return {"done": False, "itinerary_id": itinerary_id, "itinerary_name": itinerary_name} client = bigquery.Client.from_service_account_json("Sherpa-3244e874fcf9.json") query = """ INSERT INTO `sherpa-238315.Sherpa.Users` (user_id, user_name, itinerary_id, itinerary_name) VALUES ( "%s", "%s", "%s", "%s" ) """ %(user_id, user_name, itinerary_id, itinerary_name) query_job = client.query(query) results = query_job.result() return {"done": True, "itinerary_id": itinerary_id, "itinerary_name": itinerary_name} def is_duplicate_itinerary(user_id, itinerary_id, itinerary_name): client = bigquery.Client.from_service_account_json("Sherpa-3244e874fcf9.json") query = """ SELECT * FROM `sherpa-238315.Sherpa.Users` WHERE user_id="%s" AND ( itinerary_id="%s" OR itinerary_name="%s" ) """ %(user_id, itinerary_id, itinerary_name) query_job = client.query(query) results = query_job.result() df = results.to_dataframe() if df.shape[0] == 0: return False return True def get_itineraries_from_user(user_id): client = bigquery.Client.from_service_account_json("Sherpa-3244e874fcf9.json") query = """ SELECT * FROM `sherpa-238315.Sherpa.Users` WHERE user_id="%s" """ %(user_id) query_job = client.query(query) results = query_job.result() df = results.to_dataframe() users = {"itineraries": {}} for idx, row in df.iterrows(): if row["user_id"] not in users: users["user_id"] = row["user_id"] if row["user_name"] not in users: users["user_name"] = row["user_name"] users["itineraries"][row["itinerary_id"]] = row["itinerary_name"] return users def delete_itinerary_from_user(user_id, itinerary_id): delete_itinerary(user_id, itinerary_id) client = bigquery.Client.from_service_account_json("Sherpa-3244e874fcf9.json") query = """ DELETE FROM `sherpa-238315.Sherpa.Users` WHERE itinerary_id="%s" AND user_id="%s" """ %(itinerary_id, user_id) query_job = client.query(query) results = query_job.result() return {"done": True} ```
{ "source": "jennyxue97/SpeakEasyBackend", "score": 2 }
#### File: jennyxue97/SpeakEasyBackend/users.py ```python import config_cosmos import azure.cosmos.cosmos_client as cosmos_client def post_user(user_name): collection_link = "dbs/speakeasy/colls/users" response_get_user = get_user(user_name) if response_get_user["user_exists"]: return response_get_user client = cosmos_client.CosmosClient(url_connection=config_cosmos.COSMOSDB_HOST, auth={'masterKey': config_cosmos.COSMOSDB_KEY}) client.CreateItem(collection_link, { "user_name": user_name, "id": user_name }) return {"user_exists": False, "user_name": user_name} def get_user(user_name): collection_link = "dbs/speakeasy/colls/users" client = cosmos_client.CosmosClient(url_connection=config_cosmos.COSMOSDB_HOST, auth={'masterKey': config_cosmos.COSMOSDB_KEY}) query = "SELECT * FROM users WHERE users.user_name='%s'" %user_name data = list(client.QueryItems(collection_link, query, config_cosmos.OPTIONS)) if len(data) > 0: return {"user_exists": True} return {"user_exists": False} ```
{ "source": "jennyzdse/jennyzdse.github.io", "score": 3 }
#### File: python/douban/douban.py ```python import urllib import urllib2 import sys, re import cookielib import hashlib import httplib import StringIO DEBUG = 1 def Connect2Web(addr): aResp = urllib2.urlopen(addr); web_pg = aResp.read(); if DEBUG: print addr #print web_pg return web_pg def getReviewList(url): reviews = [] index = 0 addr = url while True: web_pg = Connect2Web(addr) for line in StringIO.StringIO(web_pg): if ("http://book.douban.com/review/" in line): matchObj = re.search(r'a title="(.*)" href="(http://book.douban.com/review/[0-9]+/)', line) if matchObj: if DEBUG: print matchObj.group(1) print matchObj.group(2) reviews.append(matchObj.group(1)) reviews.append(matchObj.group(2)) # next page index = index + 10 addr = url+'?start='+str(index) if addr not in web_pg: #print addr break return reviews def fetchContent(fp, review): web_pg = Connect2Web(review) # date matchObj = re.search(r'class="mn">([0-9 \-:]+)</span>', web_pg) if matchObj: print "date", matchObj.group(1) fp.write('<span class="mn">'+matchObj.group(1)+'</span><br>\n') # from which book matchObj = re.search(r'(<a href="http://book.douban.com/subject/[0-9]+/"><span property="v:itemreviewed">.*</span></a>)', web_pg) if matchObj: print matchObj.group(1) fp.write(matchObj.group(1)+'<br>\n') # the content matchObj = re.search(r'<span property="v:description" class="">(.*)<div class="clear"></div></span>', web_pg) if matchObj: #print matchObj.group(1) fp.write('<p>'+matchObj.group(1)+'</p><br>\n') fp.write('<a href="#top">Return<br></a>\n') return def writeIndex(fp, reviews): fp.write('<div id="menu">\n') i = 1 for review in reviews: if "http:" not in review: fp.write('<a href="#review'+str(i)+'">'+review+'</a><br>\n') i = i + 1 fp.write("</div>\n") return def main(argv): if argv: id = argv[0] url = "http://www.douban.com/people/"+id+"/reviews" else: return reviews = getReviewList(url) # open the output file: blog.html outf = "reviews.html" fp = open(outf, 'w') # put html head fp.write("<html>\n") fp.write("<head>\n") fp.write("<title>My douban reviews page</title>\n") fp.write('<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\n') fp.write('<link rel="stylesheet" type="text/css" href="styles.css" />\n') #fp.write("<style>\n") #fp.write("body {background-color:lightgray}\n") #fp.write("h2 {color:blue}\n") #fp.write("p {color:green;padding:20px;margin:100px;border:1px solid black;}\n") #fp.write("</style>\n") fp.write("</head>\n") fp.write('<body><div id="wrapper">\n') fp.write('<a name="top"></a><h1>My Douban Book Reviews</h1>\n') writeIndex(fp, reviews) fp.write('<div id="context">\n') i = 1 for review in reviews: if "http:" in review: fetchContent(fp, review) fp.write('-------------------------------------<br><br><br><br>\n') else: fp.write('<a name="review'+str(i)+'"></a><h2>'+review+'</h2><br>\n') i = i+1 fp.write("</div>\n") fp.write("</div></body></html>\n") fp.close if __name__ == '__main__': usage = '''Usage: douban.py <person-id> ''' print usage main(sys.argv[1:]) ```
{ "source": "jennyzhang0215/dgl", "score": 2 }
#### File: mxnet/gc-mc/data.py ```python import numpy as np import os import re import pandas as pd import scipy.sparse as sp import gluonnlp as nlp import networkx as nx import dgl import mxnet as mx READ_DATASET_PATH = os.path.join("data_set") GENRES_ML_100K =\ ['unknown', 'Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'] GENRES_ML_1M = GENRES_ML_100K[1:] GENRES_ML_10M = GENRES_ML_100K + ['IMAX'] _word_embedding = nlp.embedding.GloVe('glove.840B.300d') _tokenizer = nlp.data.transforms.SpacyTokenizer() class MovieLens(object): def __init__(self, name, ctx, use_one_hot_fea=False, symm=True, test_ratio=0.1, valid_ratio = 0.1): self._name = name self._ctx = ctx self._symm = symm self._test_ratio = test_ratio self._valid_ratio = valid_ratio print("Starting processing {} ...".format(self._name)) self._load_raw_user_info() self._load_raw_movie_info() if self._name == 'ml-100k': self.all_train_rating_info = self._load_raw_rates(os.path.join(READ_DATASET_PATH, self._name, 'u1.base'), '\t') self.test_rating_info = self._load_raw_rates(os.path.join(READ_DATASET_PATH, self._name, 'u1.test'), '\t') self.all_rating_info = pd.concat([self.all_train_rating_info, self.test_rating_info]) elif self._name == 'ml-1m' or self._name == 'ml-10m': self.all_rating_info = self._load_raw_rates(os.path.join(READ_DATASET_PATH, self._name, 'ratings.dat'), '::') num_test = int(np.ceil(self.all_rating_info.shape[0] * self._test_ratio)) shuffled_idx = np.random.permutation(self.all_rating_info.shape[0]) self.test_rating_info = self.all_rating_info.iloc[shuffled_idx[: num_test]] self.all_train_rating_info = self.all_rating_info.iloc[shuffled_idx[num_test: ]] else: raise NotImplementedError num_valid = int(np.ceil(self.all_train_rating_info.shape[0] * self._valid_ratio)) shuffled_idx = np.random.permutation(self.all_train_rating_info.shape[0]) self.valid_rating_info = self.all_train_rating_info.iloc[shuffled_idx[: num_valid]] self.train_rating_info = self.all_train_rating_info.iloc[shuffled_idx[num_valid: ]] print("All rating pairs : {}".format(self.all_rating_info.shape[0])) print("\tAll train rating pairs : {}".format(self.all_train_rating_info.shape[0])) print("\t\tTrain rating pairs : {}".format(self.train_rating_info.shape[0])) print("\t\tValid rating pairs : {}".format(self.valid_rating_info.shape[0])) print("\tTest rating pairs : {}".format(self.test_rating_info.shape[0])) self.user_info = self._drop_unseen_nodes(orign_info=self.user_info, cmp_col_name="id", reserved_ids_set=set(self.all_rating_info["user_id"].values), label="user") self.movie_info = self._drop_unseen_nodes(orign_info=self.movie_info, cmp_col_name="id", reserved_ids_set=set(self.all_rating_info["movie_id"].values), label="movie") # Map user/movie to the global id self.global_user_id_map = {ele: i for i, ele in enumerate(self.user_info['id'])} self.global_movie_id_map = {ele: i for i, ele in enumerate(self.movie_info['id'])} print('Total user number = {}, movie number = {}'.format(len(self.global_user_id_map), len(self.global_movie_id_map))) self._num_user = len(self.global_user_id_map) self._num_movie = len(self.global_movie_id_map) ### Generate features if use_one_hot_fea: self.user_feature = np.eye(self.num_user) self.movie_feature = np.eye(self.num_movie) else: self.user_feature = self._process_user_fea() self.movie_feature = self._process_movie_fea() info_line = "Feature dim: " info_line += "\n{}: {}".format(self.name_user, self.user_feature.shape) info_line += "\n{}: {}".format(self.name_movie, self.movie_feature.shape) print(info_line) """ self.train_rating_pairs, self.train_rating_values = self._generate_pair_value(self.train_rating_info) self.valid_rating_pairs, self.valid_rating_values = self._generate_pair_value(self.valid_rating_info) self.test_rating_pairs, self.test_rating_values = self._generate_pair_value(self.test_rating_info) self.train_graph = self._generate_graphs(self.train_rating_pairs, self.train_rating_values, add_support=True) self.train_graph[self.name_user].ndata['fea'] = mx.nd.array(self.user_feature, ctx=ctx, dtype=np.float32) self.train_graph[self.name_movie].ndata['fea'] = mx.nd.array(self.movie_feature, ctx=ctx, dtype=np.float32) """ all_train_rating_pairs, all_train_rating_values = self._generate_pair_value(self.all_train_rating_info) train_rating_pairs, train_rating_values = self._generate_pair_value(self.train_rating_info) valid_rating_pairs, valid_rating_values = self._generate_pair_value(self.valid_rating_info) self.test_rating_pairs, self.test_rating_values = self._generate_pair_value(self.test_rating_info) self.test_graph = self._generate_graphs(all_train_rating_pairs, all_train_rating_values, add_support=True) self.test_graph[self.name_user].ndata['fea'] = mx.nd.array(self.user_feature, ctx=ctx, dtype=np.float32) self.test_graph[self.name_movie].ndata['fea'] = mx.nd.array(self.movie_feature, ctx=ctx, dtype=np.float32) uv_test_graph = self.test_graph[self.name_user, self.name_movie, self.name_edge] vu_test_graph = self.test_graph[self.name_movie, self.name_user, self.name_edge] self.train_graph = self.test_graph.edge_subgraph( {(self.name_user, self.name_movie, self.name_edge): uv_test_graph.edge_ids(train_rating_pairs[0], train_rating_pairs[1]), (self.name_movie, self.name_user, self.name_edge): vu_test_graph.edge_ids(train_rating_pairs[1], train_rating_pairs[0]) }) self.train_graph.copy_from_parent() test2train_g_node_id_map = {} for node_type in [self.name_user, self.name_movie]: test2train_g_node_id_map[node_type] = {} p_nids = self.train_graph.parent_nid(node_type).asnumpy() #print("\t{}: {} nodes".format(node_type, p_nids.size)) for idx, p_nid in enumerate(p_nids): test2train_g_node_id_map[node_type][p_nid] = idx self.train_rating_pairs = (np.array(list(map(test2train_g_node_id_map[self.name_user].get, list(train_rating_pairs[0]))), dtype=np.int64), np.array(list(map(test2train_g_node_id_map[self.name_movie].get, list(train_rating_pairs[1]))), dtype=np.int64)) self.train_rating_values = train_rating_values filtered_valid_user = [] filtered_valid_movie = [] filtered_valid_values = [] for i in range(valid_rating_pairs[0].size): if valid_rating_pairs[0][i] in train_rating_pairs[0] and \ valid_rating_pairs[1][i] in train_rating_pairs[1]: filtered_valid_user.append(test2train_g_node_id_map[self.name_user][valid_rating_pairs[0][i]]) filtered_valid_movie.append(test2train_g_node_id_map[self.name_movie][valid_rating_pairs[1][i]]) filtered_valid_values.append(valid_rating_values[i]) print("Filtered {} validation node pairs".format(valid_rating_pairs[0].size - len(filtered_valid_user))) self.valid_rating_pairs = (np.array(filtered_valid_user, dtype=np.int64), np.array(filtered_valid_movie, dtype=np.int64)) self.valid_rating_values = np.array(filtered_valid_values, dtype=np.float32) #self.uv_train_graph = self.uv_test_graph.edge_subgraph(self.train_rating_pairs) print("Train graph: \t#user:{}\t#movie:{}\t#pairs:{}".format( self.train_graph[self.name_user].number_of_nodes(), self.train_graph[self.name_movie].number_of_nodes(), self.train_graph[self.name_user, self.name_movie, self.name_edge].number_of_edges())) print("Test graph: \t#user:{}\t#movie:{}\t#pairs:{}".format( self.test_graph[self.name_user].number_of_nodes(), self.test_graph[self.name_movie].number_of_nodes(), self.test_graph[self.name_user, self.name_movie, self.name_edge].number_of_edges())) def _generate_pair_value(self, rating_info): rating_pairs = (np.array([self.global_user_id_map[ele] for ele in rating_info["user_id"]], dtype=np.int64), np.array([self.global_movie_id_map[ele] for ele in rating_info["movie_id"]], dtype=np.int64)) rating_values = rating_info["rating"].values.astype(np.float32) return rating_pairs, rating_values def _generate_graphs(self, rating_pairs, rating_values, add_support=False): user_movie_ratings_coo = sp.coo_matrix( (rating_values, rating_pairs), shape=(self._num_user, self._num_movie),dtype=np.float32) movie_user_ratings_coo = user_movie_ratings_coo.transpose() user_movie_R = np.zeros((self._num_user, self._num_movie), dtype=np.float32) user_movie_R[rating_pairs] = rating_values movie_user_R = user_movie_R.transpose() graph = dgl.DGLBipartiteGraph( metagraph=nx.MultiGraph([(self.name_user, self.name_movie, self.name_edge), (self.name_movie, self.name_user, self.name_edge)]), number_of_nodes_by_type={self.name_user: self._num_user, self.name_movie: self._num_movie}, edge_connections_by_type={(self.name_user, self.name_movie, self.name_edge): user_movie_ratings_coo, (self.name_movie, self.name_user, self.name_edge): movie_user_ratings_coo}, readonly=True) graph[self.name_user, self.name_movie, self.name_edge].edata['R'] \ = mx.nd.array(rating_values, ctx=self._ctx, dtype=np.float32) if add_support: uv_train_support_l = self.compute_support(user_movie_R, self.num_links, self._symm) for idx, sup in enumerate(uv_train_support_l): graph[self.name_user, self.name_movie, self.name_edge].edges[ np.array(sup.row, dtype=np.int64), np.array(sup.col, dtype=np.int64)].data['support{}'.format(idx)] = \ mx.nd.array(sup.data, ctx=self._ctx, dtype=np.float32) vu_train_support_l = self.compute_support(movie_user_R, self.num_links, self._symm) for idx, sup in enumerate(vu_train_support_l): graph[self.name_movie, self.name_user, self.name_edge].edges[ np.array(sup.row, dtype=np.int64), np.array(sup.col, dtype=np.int64)].data['support{}'.format(idx)] = \ mx.nd.array(sup.data, ctx=self._ctx, dtype=np.float32) return graph @property def possible_rating_values(self): return np.unique(self.train_rating_info["rating"].values) @property def num_links(self): return self.possible_rating_values.size @property def name_user(self): return "user" @property def name_movie(self): return "movie" @property def name_edge(self): return "rating" @property def num_user(self): return self._num_user @property def num_movie(self): return self._num_movie def _drop_unseen_nodes(self, orign_info, cmp_col_name, reserved_ids_set, label): # print(" -----------------") # print("{}: {}(reserved) v.s. {}(from info)".format(label, len(reserved_ids_set), # len(set(orign_info[cmp_col_name].values)))) if reserved_ids_set != set(orign_info[cmp_col_name].values): pd_rating_ids = pd.DataFrame(list(reserved_ids_set), columns=["id_graph"]) # print("\torign_info: ({}, {})".format(orign_info.shape[0], orign_info.shape[1])) data_info = orign_info.merge(pd_rating_ids, left_on=cmp_col_name, right_on='id_graph', how='outer') data_info = data_info.dropna(subset=[cmp_col_name, 'id_graph']) data_info = data_info.drop(columns=["id_graph"]) data_info = data_info.reset_index(drop=True) # print("\tAfter dropping, data shape: ({}, {})".format(data_info.shape[0], data_info.shape[1])) return data_info else: orign_info = orign_info.reset_index(drop=True) return orign_info def _load_raw_rates(self, file_path, sep): """In MovieLens, the rates have the following format ml-100k user id \t movie id \t rating \t timestamp ml-1m/10m UserID::MovieID::Rating::Timestamp timestamp is unix timestamp and can be converted by pd.to_datetime(X, unit='s') Parameters ---------- file_path : str Returns ------- rating_info : pd.DataFrame """ rating_info = pd.read_csv( file_path, sep=sep, header=None, names=['user_id', 'movie_id', 'rating', 'timestamp'], dtype={'user_id': np.int32, 'movie_id' : np.int32, 'ratings': np.float32, 'timestamp': np.int64}, engine='python') return rating_info def _load_raw_user_info(self): """In MovieLens, the user attributes file have the following formats: ml-100k: user id | age | gender | occupation | zip code ml-1m: UserID::Gender::Age::Occupation::Zip-code For ml-10m, there is no user information. We read the user id from the rating file. Parameters ---------- name : str Returns ------- user_info : pd.DataFrame """ if self._name == 'ml-100k': self.user_info = pd.read_csv(os.path.join(READ_DATASET_PATH, self._name, 'u.user'), sep='|', header=None, names=['id', 'age', 'gender', 'occupation', 'zip_code'], engine='python') elif self._name == 'ml-1m': self.user_info = pd.read_csv(os.path.join(READ_DATASET_PATH, self._name, 'users.dat'), sep='::', header=None, names=['id', 'gender', 'age', 'occupation', 'zip_code'], engine='python') elif self._name == 'ml-10m': rating_info = pd.read_csv( os.path.join(READ_DATASET_PATH, self._name, 'ratings.dat'), sep='::', header=None, names=['user_id', 'movie_id', 'rating', 'timestamp'], dtype={'user_id': np.int32, 'movie_id': np.int32, 'ratings': np.float32, 'timestamp': np.int64}, engine='python') self.user_info = pd.DataFrame(np.unique(rating_info['user_id'].values.astype(np.int32)), columns=['id']) else: raise NotImplementedError def _process_user_fea(self): """ Parameters ---------- user_info : pd.DataFrame name : str For ml-100k and ml-1m, the column name is ['id', 'gender', 'age', 'occupation', 'zip_code']. We take the age, gender, and the one-hot encoding of the occupation as the user features. For ml-10m, there is no user feature and we set the feature to be a single zero. Returns ------- user_features : np.ndarray """ if self._name == 'ml-100k' or self._name == 'ml-1m': ages = self.user_info['age'].values.astype(np.float32) gender = (self.user_info['gender'] == 'F').values.astype(np.float32) all_occupations = set(self.user_info['occupation']) occupation_map = {ele: i for i, ele in enumerate(all_occupations)} occupation_one_hot = np.zeros(shape=(self.user_info.shape[0], len(all_occupations)), dtype=np.float32) occupation_one_hot[np.arange(self.user_info.shape[0]), np.array([occupation_map[ele] for ele in self.user_info['occupation']])] = 1 user_features = np.concatenate([ages.reshape((self.user_info.shape[0], 1)) / 50.0, gender.reshape((self.user_info.shape[0], 1)), occupation_one_hot], axis=1) elif self._name == 'ml-10m': user_features = np.zeros(shape=(self.user_info.shape[0], 1), dtype=np.float32) else: raise NotImplementedError return user_features def _load_raw_movie_info(self): """In MovieLens, the movie attributes may have the following formats: In ml_100k: movie id | movie title | release date | video release date | IMDb URL | [genres] In ml_1m, ml_10m: MovieID::Title (Release Year)::Genres Also, Genres are separated by |, e.g., Adventure|Animation|Children|Comedy|Fantasy Parameters ---------- name : str Returns ------- movie_info : pd.DataFrame For ml-100k, the column name is ['id', 'title', 'release_date', 'video_release_date', 'url'] + [GENRES (19)]] For ml-1m and ml-10m, the column name is ['id', 'title'] + [GENRES (18/20)]] """ if self._name == 'ml-100k': GENRES = GENRES_ML_100K elif self._name == 'ml-1m': GENRES = GENRES_ML_1M elif self._name == 'ml-10m': GENRES = GENRES_ML_10M else: raise NotImplementedError if self._name == 'ml-100k': file_path = os.path.join(READ_DATASET_PATH, self._name, 'u.item') self.movie_info = pd.read_csv(file_path, sep='|', header=None, names=['id', 'title', 'release_date', 'video_release_date', 'url'] + GENRES, engine='python') elif self._name == 'ml-1m' or self._name == 'ml-10m': file_path = os.path.join(READ_DATASET_PATH, self._name, 'movies.dat') movie_info = pd.read_csv(file_path, sep='::', header=None, names=['id', 'title', 'genres'], engine='python') genre_map = {ele: i for i, ele in enumerate(GENRES)} genre_map['Children\'s'] = genre_map['Children'] genre_map['Childrens'] = genre_map['Children'] movie_genres = np.zeros(shape=(movie_info.shape[0], len(GENRES)), dtype=np.float32) for i, genres in enumerate(movie_info['genres']): for ele in genres.split('|'): if ele in genre_map: movie_genres[i, genre_map[ele]] = 1.0 else: print('genres not found, filled with unknown: {}'.format(genres)) movie_genres[i, genre_map['unknown']] = 1.0 for idx, genre_name in enumerate(GENRES): assert idx == genre_map[genre_name] movie_info[genre_name] = movie_genres[:, idx] self.movie_info = movie_info.drop(columns=["genres"]) else: raise NotImplementedError def _process_movie_fea(self): """ Parameters ---------- movie_info : pd.DataFrame name : str Returns ------- movie_features : np.ndarray Generate movie features by concatenating embedding and the year """ if self._name == 'ml-100k': GENRES = GENRES_ML_100K elif self._name == 'ml-1m': GENRES = GENRES_ML_1M elif self._name == 'ml-10m': GENRES = GENRES_ML_10M else: raise NotImplementedError title_embedding = np.zeros(shape=(self.movie_info.shape[0], 300), dtype=np.float32) release_years = np.zeros(shape=(self.movie_info.shape[0], 1), dtype=np.float32) p = re.compile(r'(.+)\s*\((\d+)\)') for i, title in enumerate(self.movie_info['title']): match_res = p.match(title) if match_res is None: print('{} cannot be matched, index={}, name={}'.format(title, i, self._name)) title_context, year = title, 1950 else: title_context, year = match_res.groups() # We use average of glove title_embedding[i, :] =_word_embedding[_tokenizer(title_context)].asnumpy().mean(axis=0) release_years[i] = float(year) movie_features = np.concatenate((title_embedding, (release_years - 1950.0) / 100.0, self.movie_info[GENRES]), axis=1) return movie_features def compute_support(self, adj, num_links, symmetric): adj_unnormalized_l = [] adj_train_int = sp.csr_matrix(adj, dtype=np.int32) for i in range(num_links): # build individual binary rating matrices (supports) for each rating adj_unnormalized = sp.csr_matrix(adj_train_int == i + 1, dtype=np.float32) adj_unnormalized_l.append(adj_unnormalized) # degree_u and degree_v are row and column sums of adj+I adj_tot = np.sum(adj for adj in adj_unnormalized_l) ## it is just the original training adj degree_u = np.asarray(adj_tot.sum(1)).flatten() degree_v = np.asarray(adj_tot.sum(0)).flatten() # set zeros to inf to avoid dividing by zero degree_u[degree_u == 0.] = np.inf degree_v[degree_v == 0.] = np.inf degree_u_inv_sqrt = 1. / np.sqrt(degree_u) degree_v_inv_sqrt = 1. / np.sqrt(degree_v) degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0]) degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0]) degree_u_inv = degree_u_inv_sqrt_mat.dot(degree_u_inv_sqrt_mat) support_sp_l = [] for adj in adj_unnormalized_l: if symmetric: sup = sp.coo_matrix(degree_u_inv_sqrt_mat.dot(adj).dot(degree_v_inv_sqrt_mat)) else: sup = sp.csr_matrix(degree_u_inv.dot(adj)) print("sup.data", sup.nnz) support_sp_l.append(sup) return support_sp_l if __name__ == '__main__': MovieLens("ml-100k", ctx=mx.gpu(0), symm=True) MovieLens("ml-100k", ctx=mx.gpu(0), symm=False) MovieLens("ml-1m", ctx=mx.gpu(0), symm=True) MovieLens("ml-1m", ctx=mx.gpu(0), symm=False) ``` #### File: mxnet/gcmc/graph.py ```python import numpy as np import mxnet as mx import numpy.testing as npt import scipy.sparse as sp import io import mxgraph._graph_sampler as _graph_sampler import pandas as pd import os import json try: import cPickle as pickle except ImportError: import pickle def npy_seg_mul(lhs, ind_ptr, rhs): """ ret[ind_ptr[i]:ind_ptr[i+1]] = lhs[ind_ptr[i]:ind_ptr[i+1]] * rhs[i] Parameters ---------- lhs : np.ndarray ind_ptr : np.ndarray rhs : np.ndarray Returns ------- ret : np.ndarray """ return _graph_sampler.seg_mul(lhs, ind_ptr, rhs) def npy_seg_add(lhs, ind_ptr, rhs): """ ret[ind_ptr[i]:ind_ptr[i+1]] = lhs[ind_ptr[i]:ind_ptr[i+1]] + rhs[i] Parameters ---------- lhs : np.ndarray ind_ptr : np.ndarray rhs : np.ndarray Returns ------- ret : np.ndarray """ return _graph_sampler.seg_add(lhs, ind_ptr, rhs) def npy_seg_sum(data, ind_ptr): """ ret[i] = data[ind_ptr[i]:ind_ptr[i+1]].sum() Parameters ---------- data : np.ndarray ind_ptr : np.ndarray Returns ------- ret : np.ndarray """ return _graph_sampler.seg_sum(data, ind_ptr) def take1d(data, sel): return _graph_sampler.take_1d_omp(np.ascontiguousarray(data), np.ascontiguousarray(sel, dtype=np.int32)) def unordered_unique(data, return_counts=False, return_inverse=False): if return_counts: return _graph_sampler.unique_cnt(np.ascontiguousarray(data).astype(np.int32)) if return_inverse: return _graph_sampler.unique_inverse(np.ascontiguousarray(data).astype(np.int32)) raise NotImplementedError def set_seed(seed): """Set the random seed of the inner sampling handler Parameters ---------- seed : int Returns ------- ret : bool """ return _graph_sampler.set_seed(seed) def _gen_edge_row_indices_by_indptr(ind_ptr, nnz): """ Generate the row_indices in the COO format based on the indptr nnz = 7 ind_ptr = [0, 2, 5, 7] edge_row_indices = [0, 0, 1, 1, 1, 2, 2] Parameters ---------- ind_ptr : np.ndarray nnz : int Returns ------- edge_row_indices : np.ndarray """ return _graph_sampler.gen_row_indices_by_indptr(ind_ptr.astype(np.int32), nnz) def _shallow_copy_stacked_dict(dic): new_dict = {} for k1 in dic: new_dict[k1] = {} for k2 in dic[k1]: new_dict[k1][k2] = dic[k1][k2] return new_dict class NodeIDRMap(object): def __init__(self, node_ids): self._rmap = dict() for i, node_id in enumerate(node_ids): self._rmap[node_id] = i def __getitem__(self, node_ids): if isinstance(node_ids, (np.ndarray, list, tuple)): return np.array(list(map(lambda ele: self._rmap[ele], node_ids)), dtype=np.int32) else: return self._rmap[node_ids] class NodeIDRMapFast(object): def __init__(self, node_ids): """ Parameters ---------- node_ids : np.ndarray For example: node_ids = [5, 9, 6, 12] _rmap = [ 0, 2, -1, -1, 1, -1, -1, 3] 5| 6| 9| 12| """ self._node_id_min = node_ids.min() self._node_id_max = node_ids.max() self._rmap = -1 * np.ones(self._node_id_max - self._node_id_min + 1, dtype=np.int32) self._rmap[node_ids - self._node_id_min] = np.arange(node_ids.size, dtype=np.int32) def __getitem__(self, node_ids): return self._rmap[node_ids - self._node_id_min] def merge_nodes(node_ids): """ Parameters ---------- node_ids : list of np.ndarray or np.ndarray Returns ------- uniq_node_ids : np.ndarray indices : list of np.ndarray or np.ndarray """ if isinstance(node_ids, np.ndarray): return unordered_unique(node_ids, return_inverse=True) else: uniq_node_ids, all_indices = unordered_unique(np.concatenate(node_ids, axis=0), return_inverse=True) indices = [] begin = 0 for ele in node_ids: indices.append(all_indices[begin:(begin + ele.size)]) begin += ele.size return uniq_node_ids, indices def merge_node_ids_dict(data): """ Parameters ---------- data : tuple/list of dict There are two possible situations: 1) {key: indices}, the ``indices'' has shape (#nodes,) 2) {(src_key, dst_key): indices}, the ``indices'' has shape (1 + K, #nodes) Returns ------- uniq_node_ids_dict : dict new_idx_dict_l : list of dict """ uniq_node_ids_dict = dict() new_idx_dict_l = [] all_ids_dict = dict() for ele_dict in data: for key, indices in ele_dict.items(): if isinstance(key, tuple): assert ele_dict[key].ndim == 2 src_key, dst_key = key if src_key in all_ids_dict: all_ids_dict[src_key].append(indices[0, :]) else: all_ids_dict[src_key] = [indices[0, :]] if dst_key in all_ids_dict: all_ids_dict[dst_key].append(indices[1:, :].reshape((-1,))) else: all_ids_dict[dst_key] = [indices[1:, :].reshape((-1,))] else: if key in all_ids_dict: all_ids_dict[key].append(indices) else: all_ids_dict[key] = [indices] counter = {key: 0 for key in all_ids_dict} for key, node_ids in all_ids_dict.items(): uniq_node_ids_dict[key], all_ids_dict[key] = merge_nodes(node_ids) for ele_dict in data: new_idx_dict = dict() for key, indices in ele_dict.items(): if isinstance(key, tuple): src_key, dst_key = key src_new_indices = all_ids_dict[src_key][counter[src_key]].reshape(indices[0:1, :].shape) dst_new_indices = all_ids_dict[dst_key][counter[dst_key]].reshape(indices[1:, :].shape) new_idx_dict[key] = np.concatenate([src_new_indices, dst_new_indices], axis=0) counter[src_key] += 1 counter[dst_key] += 1 else: new_idx_dict[key] = all_ids_dict[key][counter[key]] counter[key] += 1 new_idx_dict_l.append(new_idx_dict) return uniq_node_ids_dict, new_idx_dict_l def empty_as_zero(l, dtype): return [ele.astype(dtype) if ele.size > 0 else np.zeros(shape=(1,), dtype=dtype) for ele in l] class NodeFeatures(object): """A simple wrapper for node features/states """ def __init__(self, data, node_ids): """Initialize the NodeFeature object Parameters ---------- data : mx.nd.ndarray Shape (#Node, C) node_ids : np.ndarray Shape (#Node) """ self._ctx = data.context self.data = data self.node_ids = node_ids self._node_id_rmap = None def __repr__(self): stream = io.StringIO() sprint = lambda *args: print(*args, file=stream) sprint('NodeFeatures(') sprint('data=') sprint(self.data) sprint('node_ids=') with np.printoptions(precision=3, suppress=True): sprint(self.node_ids) sprint(')') return stream.getvalue() def take_by_id(self, sel_node_ids): if self._node_id_rmap is None: self._node_id_rmap = NodeIDRMapFast(self.node_ids) node_inds = mx.nd.array(self._node_id_rmap[sel_node_ids], dtype=np.int32, ctx=self._ctx) return NodeFeatures(mx.nd.take(self.data, node_inds, axis=0), sel_node_ids) class CSRMat(object): """A simple wrapper of the CSR Matrix. We can view it as a bipartite graph Apart from the traditoinal CSR format, we use two additional arrays: row_ids and col_ids to track the original ids of the row/col indices We use the C++ API to accelerate the speed if possible """ def __init__(self, end_points, ind_ptr, row_ids, col_ids, values=None, multi_link=None, force_contiguous=True): """Initialize the CSRMat Parameters ---------- end_points : np.ndarray The end_points of the edges. shape (nnz,) ind_ptr : np.ndarray The starting point in end_points row_ids : np.ndarray col_ids : np.ndarray values : np.ndarray Values on the edge multi_link : None or list-like object The multi-link structure of the csr matrix. This indicates the possible values of the edges. For example, there are 3 possible ratings, 0.5, 1.0, 1.5 between user and item, we can tell CSRMat about this by setting ```graph = CSRMat(multi_link=[0.5, 1.0, 1.5])``` force_contiguous : bool Whether to force the end_points, ind_ptr and other elements as contiguous arrays """ assert ind_ptr[0] == 0 and ind_ptr[-1] == end_points.shape[0] self.end_points = end_points self.ind_ptr = ind_ptr self.values = np.ones(shape=self.end_points.shape, dtype=np.float32) if values is None\ else values.astype(np.float32) self.multi_link = np.sort(multi_link) if multi_link is not None else None self.row_ids = row_ids self.col_ids = col_ids assert self.ind_ptr.size == len(self.row_ids) + 1 if force_contiguous: self.end_points = np.ascontiguousarray(self.end_points, dtype=np.int32) self.ind_ptr = np.ascontiguousarray(self.ind_ptr, dtype=np.int32) if self.values is not None: self.values = np.ascontiguousarray(self.values, dtype=np.float32) self.row_ids = np.ascontiguousarray(self.row_ids, dtype=np.int32) self.col_ids = np.ascontiguousarray(self.col_ids, dtype=np.int32) self._node_pair_indices = None self._node_pair_ids = None self._row_id_rmap = NodeIDRMapFast(self.row_ids) self._col_id_rmap = NodeIDRMapFast(self.col_ids) self._cached_spy_csr = None self._cached_row_degrees = None self._cached_col_degrees = None self._cached_support = dict() def save_edges_txt(self, fname): with open(fname, 'w') as f: for row_id, col_id, value in zip(self.node_pair_ids[0], self.node_pair_ids[1], self.values): f.write('{}\t{}\t{:g}\n'.format(row_id, col_id, value)) def to_spy(self): """Convert to the scipy csr matrix Returns ------- ret : sp.csr_matrix """ if self._cached_spy_csr is None: self._cached_spy_csr = sp.csr_matrix((self.values, self.end_points, self.ind_ptr), shape=(self.row_ids.size, self.col_ids.size)) return self._cached_spy_csr @staticmethod def from_spy(mat): """ Parameters ---------- mat : sp.csr_matrix Returns ------- ret : CSRMat """ return CSRMat(end_points=mat.indices, ind_ptr=mat.indptr, row_ids=np.arange(mat.shape[0], dtype=np.int32), col_ids=np.arange(mat.shape[1], dtype=np.int32), values=mat.data.astype(np.float32), force_contiguous=True) @property def size(self): return self.end_points.size @property def nnz(self): return self.values.size @property def shape(self): return self.row_ids.size, self.col_ids.size @property def node_pair_indices(self): """ Return row & col indices of the edges Returns ------- ret : np.ndarray Shape (2, TOTAL_EDGE_NUM) each has row, col """ if self._node_pair_indices is None: self._node_pair_indices =\ np.stack([_gen_edge_row_indices_by_indptr(self.ind_ptr, self.nnz), self.end_points], axis=0) return self._node_pair_indices @property def node_pair_ids(self): """ Return row & col ids of the edges Returns ------- ret : np.ndarray Shape (2, TOTAL_EDGE_NUM) each has row, col """ if self._node_pair_ids is None: node_pair_indices = self.node_pair_indices self._node_pair_ids = np.stack([self.row_ids[node_pair_indices[0]], self.col_ids[node_pair_indices[1]]], axis=0) return self._node_pair_ids @property def row_degrees(self): if self._cached_row_degrees is None: self._cached_row_degrees = np.ascontiguousarray(self.ind_ptr[1:] - self.ind_ptr[:-1]) return self._cached_row_degrees @property def col_degrees(self): if self._cached_col_degrees is None: self._cached_col_degrees = np.zeros(shape=len(self.col_ids), dtype=np.int32) uniq_col_indices, cnt = unordered_unique(self.end_points.astype(np.int32), return_counts=True) self._cached_col_degrees[uniq_col_indices] = cnt return self._cached_col_degrees def get_support(self, symm=True): key = symm if key in self._cached_support: return self._cached_support[key] else: if symm: col_degrees = self.col_degrees else: col_degrees = np.zeros(shape=self.col_ids.shape, dtype=np.int32) support = _graph_sampler.get_support(self.row_degrees.astype(np.int32), col_degrees, self.ind_ptr.astype(np.int32), self.end_points.astype(np.int32), int(symm)) self._cached_support[key] = support return support def row_id_to_ind(self, node_ids): """Maps node ids back to row indices in the CSRMat Parameters ---------- node_ids : np.ndarray or list or tuple or int Returns ------- ret : np.ndarray """ # if isinstance(node_ids, (np.ndarray, list, tuple)): # return np.array(list(map(lambda ele: self._row_id_reverse_mapping[ele], node_ids)), # dtype=np.int32) # else: return self._row_id_rmap[node_ids] def col_id_to_ind(self, node_ids): """Maps node ids back to col indices in the CSRMat Parameters ---------- node_ids : np.ndarray or list or tuple or int Returns ------- ret : np.ndarray """ # if isinstance(node_ids, (np.ndarray, list, tuple)): # return np.array(list(map(lambda ele: self._col_id_reverse_mapping[ele], node_ids)), # dtype=np.int32) # else: return self._col_id_rmap[node_ids] def save(self, fname): if self.multi_link is None: return np.savez_compressed(fname, row_ids=self.row_ids, col_ids=self.col_ids, values=self.values, end_points=self.end_points, ind_ptr=self.ind_ptr) else: return np.savez_compressed(fname, row_ids=self.row_ids, col_ids=self.col_ids, values=self.values, end_points=self.end_points, ind_ptr=self.ind_ptr, multi_link=self.multi_link) @staticmethod def load(fname): data = np.load(fname) multi_link = None if 'multi_link' not in data else data['multi_link'][:] return CSRMat(row_ids=data['row_ids'][:], col_ids=data['col_ids'][:], values=data['values'][:], multi_link=multi_link, end_points=data['end_points'][:], ind_ptr=data['ind_ptr'][:]) def submat(self, row_indices=None, col_indices=None): """Get the submatrix of the corresponding row/col indices Parameters ---------- row_indices : np.ndarray or None col_indices : np.ndarray or None Returns ------- ret : CSRMat """ if row_indices is None: row_indices = None else: if not isinstance(row_indices, np.ndarray): row_indices = np.array([row_indices], dtype=np.int32) else: row_indices = np.ascontiguousarray(row_indices, dtype=np.int32) if col_indices is None: col_indices = None else: if not isinstance(col_indices, np.ndarray): col_indices = np.array([col_indices], dtype=np.int32) else: col_indices = np.ascontiguousarray(col_indices, dtype=np.int32) dst_end_points, dst_values, dst_ind_ptr, dst_row_ids, dst_col_ids\ = _graph_sampler.csr_submat(np.ascontiguousarray(self.end_points.astype(np.int32), dtype=np.int32), np.ascontiguousarray(self.values), np.ascontiguousarray(self.ind_ptr.astype(np.int32), dtype=np.int32), np.ascontiguousarray(self.row_ids, dtype=np.int32), np.ascontiguousarray(self.col_ids, dtype=np.int32), row_indices, col_indices) return CSRMat(end_points=dst_end_points, ind_ptr=dst_ind_ptr, row_ids=dst_row_ids, col_ids=dst_col_ids, values=dst_values, multi_link=self.multi_link) def submat_by_id(self, row_ids=None, col_ids=None): row_indices = None if row_ids is None else self.row_id_to_ind(row_ids) col_indices = None if col_ids is None else self.col_id_to_ind(col_ids) return self.submat(row_indices, col_indices) def sample_submat(self, row_indices=None, ncols=5): """ Sample a random number of columns WITHOUT replacement for each row and form a new csr_mat Parameters ---------- row_indices : np.ndarray or None ncols : int or None None means to sample all columns Returns ------- ret : CSRMat """ if ncols is None: return self.submat(row_indices=row_indices, col_indices=None) if row_indices is None: row_indices = np.arange(self.shape[0], dtype=np.int32) sampled_indices, dst_ind_ptr \ = _graph_sampler.random_sample_fix_neighbor(self.ind_ptr.astype(np.int32), row_indices.astype(np.int32), ncols) dst_end_points = self.end_points[sampled_indices] uniq_col_indices, dst_end_points = unordered_unique(dst_end_points, return_inverse=True) return CSRMat(end_points=dst_end_points, ind_ptr=dst_ind_ptr, row_ids=self.row_ids[row_indices], col_ids=self.col_ids[uniq_col_indices], values=self.values[sampled_indices], multi_link=self.multi_link) def sample_submat_by_id(self, row_ids=None, ncols=5): """ Sample a random number of columns WITHOUT replacement for each row and form a new csr_mat. This function select the rows by the row_ids Parameters ---------- row_ids : np.ndarray or None ncols : int or None Returns ------- ret : CSRMat """ return self.sample_submat(self.row_id_to_ind(row_ids), ncols) @property def T(self): new_csr_mat = self.to_spy().T.tocsr() return CSRMat(end_points=new_csr_mat.indices, ind_ptr=new_csr_mat.indptr, values=new_csr_mat.data, row_ids=self.col_ids, col_ids=self.row_ids, multi_link=self.multi_link) def fetch_edges_by_ind(self, node_pair_indices): """Select edge values based on the indices of the node pairs Parameters ---------- node_pair_ind : np.ndarray Shape (2, SEL_EDGE_NUM) Returns ------- ret : np.ndarray Shape (SEL_EDGE_NUM,) """ ### TODO change .A1? to data ret = self.to_spy()[node_pair_indices[0, :], node_pair_indices[1, :]] if ret.size == 0: return np.ndarray([]) else: return np.array(ret).reshape((-1,)) def fetch_edges_by_id(self, node_pair_ids): """Select edge values based on the ids of node pairs Parameters ---------- node_pair_ids : np.ndarray Shape (2, SEL_EDGE_NUM) Returns ------- ret : np.ndarray Shape (SEL_EDGE_NUM,) """ return self.fetch_edges_by_ind(np.stack([self.row_id_to_ind(node_pair_ids[0]), self.col_id_to_ind(node_pair_ids[1])])) def remove_edges_by_ind(self, node_pair_indices): """ Parameters ---------- node_pair_indices : np.ndarray Shape (2, REMOVE_EDGE_NUM) Returns ------- ret : CSRMat The new CSRMat after removing these edges """ row_indices, col_indices = np.ascontiguousarray(node_pair_indices[0], dtype=np.int32),\ np.ascontiguousarray(node_pair_indices[1], dtype=np.int32) dst_end_points, dst_values, dst_indptr =\ _graph_sampler.remove_edges_by_indices(self.end_points.astype(np.int32), self.values, self.ind_ptr.astype(np.int32), row_indices.astype(np.int32), col_indices.astype(np.int32)) return CSRMat(end_points=dst_end_points, ind_ptr=dst_indptr, values=dst_values, row_ids=self.row_ids, col_ids=self.col_ids, multi_link=self.multi_link, force_contiguous=True) def remove_edges_by_id(self, node_pair_ids): """ Parameters ---------- node_pair_ids : np.ndarray Shape (2, REMOVE_EDGE_NUM) Returns ------- ret : CSRMat The new CSRMat after removing these edges """ row_ids, col_ids = node_pair_ids[0], node_pair_ids[1] return self.remove_edges_by_ind(np.stack((self.row_id_to_ind(row_ids), self.col_id_to_ind(col_ids)))) def sample_neighbors(self, src_ids=None, symm=True, use_multi_link=True, num_neighbors=None): """ Fetch the ids of the columns that are connected to the src_node Parameters ---------- src_ids : np.ndarray or None None indicates to select all the src_ids. It will have the same value as csr_mat.row_ids symm : bool Whether to use the symmetric formulation to calculate the support use_multi_link : bool Whether to sample multiple edge_values num_neighbors : int or None Number of neighbors to sample. None or a negative number indicates to sample all neighborhoods Returns ------- end_points_ids : list or np.ndarray - use_multi_link is False: Ids of the neighboring node that are connected to the source nodes. - use_multi_link is True: The output will be a list. The i-th element will contain the dst_ids that has the i-th possible edge values with the src_ids edge_values : list or np.ndarray - use_multi_link is False: Edge values between the chosen dst_ids and src_ids - use_multi_link is True: List of edge values corresponding to dst_ids ind_ptr : list or np.ndarray - use_multi_link is False: dst_ids[dst_ind_ptr[i]:dst_ind_ptr[i+1]] are connected to src_ids[i] - use_multi_link is True: List of ind_ptrs corresponding to dst_ids support : list or np.ndarray - use_multi_link is False: The support value of the edges. If `symm` is True, it's \sqrt(D(src) D(dst)) Otherwise, it's D(src) - use_multi_link is True List of support corresponding to dst_ids """ if src_ids is not None: src_inds = self.row_id_to_ind(src_ids) else: src_inds = np.arange(self.shape[0], dtype=np.int32) if num_neighbors is None: num_neighbors = -1 # The C++ implementation will sample all possible neighbors if num_neighbors is < 0. sampled_indices, dst_ind_ptr \ = _graph_sampler.random_sample_fix_neighbor(self.ind_ptr.astype(np.int32), src_inds.astype(np.int32), num_neighbors) dst_end_points_ids = np.take(self.col_ids, np.take(self.end_points, sampled_indices)) edge_values = np.take(self.values, sampled_indices) support = np.take(self.get_support(symm), sampled_indices) if not use_multi_link: return dst_end_points_ids, edge_values, dst_ind_ptr, support else: assert self.multi_link is not None split_indices, dst_ind_ptr_l = _graph_sampler.multi_link_split(edge_values, dst_ind_ptr, self.multi_link) dst_end_points_ids_l = [] edge_values_l = [] support_l = [] for sel_idx in split_indices: ele_dst_end_points_ids = np.take(dst_end_points_ids, sel_idx) ele_edge_values = np.take(edge_values, sel_idx) ele_support = np.take(support, sel_idx) dst_end_points_ids_l.append(ele_dst_end_points_ids) edge_values_l.append(ele_edge_values) support_l.append(ele_support) return dst_end_points_ids_l, edge_values_l, dst_ind_ptr_l, support_l def check_consistency(self): for i in range(len(self.ind_ptr) - 1): ele_end_points = self.end_points[self.ind_ptr[i]:self.ind_ptr[i+1]] if np.unique(ele_end_points).shape != ele_end_points.shape: raise ValueError('Found duplicates in end_points, i={}'.format(i)) def issubmat(self, large_mat): """ Check whether the matrix is a submatrix of large_mat Parameters ---------- large_mat : CSRMat Returns ------- ret : bool True or False """ for i, row_id in enumerate(self.row_ids): lmat_row_idx = large_mat.row_id_to_ind(row_id) all_end_points = large_mat.end_points[large_mat.ind_ptr[lmat_row_idx]:large_mat.ind_ptr[lmat_row_idx + 1]] all_end_point_ids = large_mat.col_ids[all_end_points] all_values = large_mat.values[large_mat.ind_ptr[lmat_row_idx]:large_mat.ind_ptr[lmat_row_idx + 1]] all_end_point_ids_value = {eid: val for eid, val in zip(all_end_point_ids, all_values)} sub_end_points_ids = self.col_ids[self.end_points[self.ind_ptr[i]:self.ind_ptr[i + 1]]] sub_values = self.values[self.ind_ptr[i]:self.ind_ptr[i + 1]] if not set(sub_end_points_ids).issubset(set(all_end_point_ids)): return False for eid, val in zip(sub_end_points_ids, sub_values): if all_end_point_ids_value[eid] != val: return False if (large_mat.multi_link is None and self.multi_link is not None) or\ (self.multi_link is None and large_mat.multi_link is not None): return False if len(large_mat.multi_link) != len(self.multi_link): return False for lhs, rhs in zip(large_mat.multi_link, self.multi_link): if lhs != rhs: return False return True def summary(self): print(self) def __repr__(self): info_str = "CSRMat:" + \ "\n Row={}, Col={}, NNZ={}".format(self.row_ids.size, self.col_ids.size, self.end_points.size) if self.multi_link is not None: info_str += '\n Multi Link={}'.format(self.multi_link) return info_str class HeterGraph(object): def __init__(self, features, node_ids=None, csr_mat_dict=None, **kwargs): """ Parameters ---------- features : dict {node_key : np.ndarray (#node, fea_dim)} node_ids : dict or None {node_key : np.ndarray (#node, )} csr_mat_dict : dict The connection between two types of nodes. Contains: {(node_key1, node_key2) : CSRMat} IMPORTANT! We allow node_key1 == node_key2, which indicates self-link, e.g., user-user """ self.features = features self.node_ids = node_ids if self.node_ids is None: self.node_ids = {} for key, features in self.features.items(): self.node_ids[key] = np.arange(features.shape[0], dtype=np.int32) else: assert sorted(self.features.keys()) == sorted(self.node_ids.keys()) for k, node_ids in self.node_ids.items(): assert node_ids.shape[0] == self.features[k].shape[0] # Generate node_id_to_ind mappings self._node_id_rmaps = dict() for k, node_ids in self.node_ids.items(): self._node_id_rmaps[k] = NodeIDRMapFast(node_ids) if 'meta_graph' not in kwargs: self.meta_graph = dict() for key in self.features: self.meta_graph[key] = dict() else: self.meta_graph = kwargs['meta_graph'] if '_csr_matrices' not in kwargs: self.csr_matrices = dict() for key in self.features: self.csr_matrices[key] = dict() for (node_key1, node_key2), mat in csr_mat_dict.items(): assert node_key1 in self.meta_graph,\ '{} not found!, meta_graph_nodes={}'.format(node_key1, self.meta_graph) assert node_key2 in self.meta_graph, \ '{} not found!, meta_graph_nodes={}'.format(node_key2, self.meta_graph) self.meta_graph[node_key1][node_key2] = 1 self.meta_graph[node_key2][node_key1] = 1 self.csr_matrices[node_key1][node_key2] = mat if node_key2 != node_key1: self.csr_matrices[node_key2][node_key1] = mat.T else: assert mat.shape[0] == mat.shape[1],\ '{} -> {} must be a square matrix'.format(node_key1, node_key2) else: self.csr_matrices = kwargs['_csr_matrices'] def check_continous_node_ids(self): for key, ele in self.node_ids.items(): np.testing.assert_allclose(np.sort(ele), np.arange(len(ele), dtype=np.int32)) def features_by_id(self, key, node_ids): """ Select a subset of the features indexed by the given node_ids Parameters ---------- key : str Name of the node node_ids : np.ndarray IDs of the nodes to select Returns ------- sub_features : np.ndarray Output """ return self.features[key][self._node_id_rmaps[key][node_ids]] @property def node_names(self): return self.features.keys() @property def node_id_rmaps(self): return self._node_id_rmaps def get_multi_link_structure(self): multi_link_structure = {} for src_key in self.csr_matrices: for dst_key, mat in self.csr_matrices[src_key].items(): multi_link_structure[(src_key, dst_key)] = \ len(mat.multi_link) if mat.multi_link is not None else None return multi_link_structure def save(self, dir_name): if not os.path.exists(dir_name): os.makedirs(dir_name) with open(os.path.join(dir_name, 'meta_graph.json'), 'w') as f: json.dump(self.meta_graph, f) for key, features in self.features.items(): np.savez_compressed(os.path.join(dir_name, '{}.npz'.format(key)), node_ids=self.node_ids[key], features=features.astype(np.float32)) cached_edge = set() for k1 in self.meta_graph: for k2 in self.meta_graph[k1]: if (k1, k2) in cached_edge: continue cached_edge.add((k1, k2)) cached_edge.add((k2, k1)) self.csr_matrices[k1][k2].save(os.path.join(dir_name, '{}_{}_csr.npz'.format(k1, k2))) def node_id_to_ind(self, key, node_ids): return self._node_id_rmaps[key][node_ids] def fetch_edges_by_id(self, src_key, dst_key, node_pair_ids): """ Parameters ---------- src_key : str dst_key : str node_pair_ids : np.ndarray Shape (2, SEL_EDGE_NUM) Returns ------- edge_values : np.ndarray """ return self.csr_matrices[src_key][dst_key].fetch_edges_by_id(node_pair_ids) def fetch_edges_by_ind(self, src_key, dst_key, node_pair_indices): """ Parameters ---------- src_key : str dst_key : str node_pair_indices : np.ndarray Shape (2, SEL_EDGE_NUM) Returns ------- edge_values : np.ndarray """ return self.csr_matrices[src_key][dst_key].fetch_edges_by_ind(node_pair_indices) def remove_edges_by_id(self, src_key, dst_key, node_pair_ids): """ Get a new heterogenous graph after the given edges are removed Parameters ---------- src_key : str dst_key : str node_pair_ids : np.ndarray Shape (2, REMOVE_EDGE_NUM) Returns ------- ret : HeterGraph """ new_csr_matrices = _shallow_copy_stacked_dict(self.csr_matrices) new_csr_matrices[src_key][dst_key] =\ self.csr_matrices[src_key][dst_key].remove_edges_by_id(node_pair_ids) new_csr_matrices[dst_key][src_key] = \ self.csr_matrices[dst_key][src_key].remove_edges_by_id(np.flipud(node_pair_ids)) return HeterGraph(features=self.features, node_ids=self.node_ids, meta_graph=self.meta_graph, _csr_matrices=new_csr_matrices) def remove_edges_by_ind(self, src_key, dst_key, node_pair_indices): """ Parameters ---------- src_key : str dst_key : str node_pair_indices : np.ndarray Returns ------- ret : HeterGraph """ # IMPORTANT! We cannot use copy in the following. Because we have a # nested dictionary structure, directly call .copy() will share the reference. new_csr_matrices = _shallow_copy_stacked_dict(self.csr_matrices) new_csr_matrices[src_key][dst_key] =\ self.csr_matrices[src_key][dst_key].remove_edges_by_ind(node_pair_indices) new_csr_matrices[dst_key][src_key] = \ self.csr_matrices[dst_key][src_key].remove_edges_by_ind(np.flipud(node_pair_indices)) return HeterGraph(features=self.features, node_ids=self.node_ids, meta_graph=self.meta_graph, _csr_matrices=new_csr_matrices) def sel_subgraph_by_id(self, key, node_ids): """ Select the given nodes from the heterogenous graph and return a new graph Parameters ---------- key : str node_ids : np.ndarray Returns ------- ret : HeterGraph """ new_features = self.features.copy() new_node_ids = self.node_ids.copy() new_csr_matrices = _shallow_copy_stacked_dict(self.csr_matrices) new_features[key] = np.take(self.features[key], self.node_id_to_ind(key, node_ids), axis=0) new_node_ids[key] = node_ids for dst_key, csr_mat in self.csr_matrices[key].items(): if dst_key != key: new_csr_matrices[key][dst_key] = csr_mat.submat_by_id(row_ids=node_ids, col_ids=None) new_csr_matrices[dst_key][key] = \ self.csr_matrices[dst_key][key].submat_by_id(row_ids=None, col_ids=node_ids) else: new_csr_matrices[key][dst_key] = csr_mat.submat_by_id(row_ids=node_ids, col_ids=node_ids) return HeterGraph(features=new_features, node_ids=new_node_ids, meta_graph=self.meta_graph, _csr_matrices=new_csr_matrices) def gen_nd_features(self, ctx): """Copy the features to the given mxnet context Parameters ---------- ctx : mx.Context Returns ------- nd_features : dict """ return {key: NodeFeatures(mx.nd.array(self.features[key], ctx=ctx, dtype=np.float32), self.node_ids[key]) for key in self.features} def check_consistency(self): _checked_edges = set() for src_key in self.meta_graph: for dst_key in self.meta_graph[src_key]: if (src_key, dst_key) in _checked_edges: continue _checked_edges.add((src_key, dst_key)) _checked_edges.add((dst_key, src_key)) src_to_dst = self.csr_matrices[src_key][dst_key] dst_to_src = self.csr_matrices[dst_key][src_key] assert src_to_dst.shape\ == (self.features[src_key].shape[0], self.features[dst_key].shape[0])\ == (dst_to_src.shape[1], dst_to_src.shape[0]) npt.assert_allclose(src_to_dst.fetch_edges_by_id(np.flipud(dst_to_src.node_pair_ids)), dst_to_src.values) npt.assert_allclose(dst_to_src.fetch_edges_by_id(np.flipud(src_to_dst.node_pair_ids)), src_to_dst.values) src_to_dst.check_consistency() dst_to_src.check_consistency() @staticmethod def load(dir_name, fea_normalize=False): with open(os.path.join(dir_name, 'meta_graph.json')) as f: meta_graph = json.load(f) features = {} node_ids = {} csr_mat_dict = {} cached_edge = set() for k1 in meta_graph: dat = np.load(os.path.join(dir_name, '{}.npz'.format(k1))) fea = dat['features'][:] if fea_normalize and fea is not None and fea.ndim == 2: from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(fea) fea = scaler.transform(fea) features[k1] = fea node_ids[k1] = dat['node_ids'][:] for k2 in meta_graph[k1]: if (k1, k2) in cached_edge: continue cached_edge.add((k1, k2)) cached_edge.add((k2, k1)) found = False for tk1, tk2 in [(k1, k2), (k2, k1)]: fname = os.path.join(dir_name, '{}_{}_csr.npz'.format(tk1, tk2)) if os.path.exists(fname): assert not found csr_mat = CSRMat.load(fname) csr_mat_dict[(tk1, tk2)] = csr_mat found = True assert found, "k1={}, k2={} not found!".format(k1, k2) return HeterGraph(features=features, node_ids=node_ids, csr_mat_dict=csr_mat_dict) def __getitem__(self, pair_keys): """ Parameters ---------- pair_keys : list-like object The src_key, dst_key pair Returns ------- ret : CSRMat The resulting bipartite graph """ assert len(pair_keys) == 2 return self.csr_matrices[pair_keys[0]][pair_keys[1]] def __repr__(self): stream = io.StringIO() print(" --------------------------", file=stream) meta_graph_npy = np.zeros(shape=(len(self.meta_graph), len(self.meta_graph)), dtype=np.int32) node_keys = self.meta_graph.keys() for key in node_keys: print('{}, num={}, feature dim={}'.format(key, self.features[key].shape[0], self.features[key].shape[1]), file=stream) node_key_map = {ele: i for i, ele in enumerate(node_keys)} for k1 in self.meta_graph: for k2 in self.meta_graph[k1]: meta_graph_npy[node_key_map[k1]][node_key_map[k2]] = self.csr_matrices[k1][k2].nnz mgraph_df = pd.DataFrame(meta_graph_npy, index=node_keys, columns=node_keys) print('meta-graph=', file=stream) print(mgraph_df, file=stream) print('multi-link=', file=stream) for k1 in self.meta_graph: for k2 in self.meta_graph[k1]: if self.csr_matrices[k1][k2].multi_link is not None: print('{} --> {}: {}'.format(k1, k2, self.csr_matrices[k1][k2].multi_link), file=stream) return stream.getvalue() def summary(self): print(self) ``` #### File: mxnet/gc-mc/model.py ```python import mxnet.ndarray as F import numpy as np import warnings from mxnet.gluon import nn, HybridBlock, Block from utils import get_activation import mxnet as mx import dgl.function as fn class MultiLinkGCNAggregator(Block): def __init__(self, src_key, dst_key, units, src_in_units, dst_in_units, num_links, dropout_rate=0.0, accum='stack', act=None, **kwargs): super(MultiLinkGCNAggregator, self).__init__(**kwargs) self._src_key = src_key self._dst_key = dst_key self._accum = accum self._num_links = num_links self._units = units if accum == "stack": assert units % num_links == 0, 'units should be divisible by the num_links ' self._units = self._units // num_links with self.name_scope(): self.dropout = nn.Dropout(dropout_rate) ### dropout before feeding the out layer self.act = get_activation(act) self.src_dst_weights = self.params.get('src_dst_weight', shape=(num_links, self._units, src_in_units), dtype=np.float32, allow_deferred_init=True) self.dst_src_weights = self.params.get('dst_dst_weight', shape=(num_links, self._units, dst_in_units), dtype=np.float32, allow_deferred_init=True) def forward(self, g): def src_node_update(nodes): Ndata = {} for i in range(self._num_links): w = self.src_dst_weights.data()[i] ## agg_units * #nodes Ndata['fea{}'.format(i)] = mx.nd.dot(self.dropout(nodes.data['fea']), w, transpose_b=True) return Ndata def dst_node_update(nodes): Ndata = {} for i in range(self._num_links): w = self.dst_src_weights.data()[i] ## agg_units * #nodes Ndata['fea{}'.format(i)] = mx.nd.dot(self.dropout(nodes.data['fea']), w, transpose_b=True) return Ndata g[self._src_key].apply_nodes(src_node_update) g[self._dst_key].apply_nodes(dst_node_update) def accum_node_func(nodes): accums = [] for i in range(self._num_links): accums.append(nodes.data['accum{}'.format(i)]) if self._accum == "sum": accum = mx.nd.add_n(*accums) elif self._accum == "stack": accum = mx.nd.concat(*accums, dim=1) else: raise NotImplementedError return {'h': self.act(accum)} src_dst_g = g[self._src_key, self._dst_key, 'rating'] dst_src_g = g[self._dst_key, self._src_key, 'rating'] for i in range(self._num_links): src_dst_g.send_and_recv(src_dst_g.edges(), ### here we can filter edges fn.src_mul_edge('fea{}'.format(i), 'support{}'.format(i), 'msg{}'.format(i)), fn.sum('msg{}'.format(i), 'accum{}'.format(i)), None) src_dst_g[self._dst_key].apply_nodes(accum_node_func) for i in range(self._num_links): dst_src_g.send_and_recv(dst_src_g.edges(), fn.src_mul_edge('fea{}'.format(i), 'support{}'.format(i), 'msg{}'.format(i)), fn.sum('msg{}'.format(i), 'accum{}'.format(i)), None) dst_src_g[self._src_key].apply_nodes(accum_node_func) dst_h = src_dst_g[self._dst_key].ndata.pop('h') src_h = dst_src_g[self._src_key].ndata.pop('h') return src_h, dst_h class GCMCLayer(Block): def __init__(self, src_key, dst_key, src_in_units, dst_in_units, agg_units, out_units, num_links, dropout_rate=0.0, agg_accum='stack', agg_act=None, out_act=None, # agg_ordinal_sharing=False, share_agg_weights=False, share_out_fc_weights=False, **kwargs): super(GCMCLayer, self).__init__(**kwargs) self._out_act = get_activation(out_act) self._src_key = src_key self._dst_key = dst_key with self.name_scope(): self.dropout = nn.Dropout(dropout_rate) self.aggregator = MultiLinkGCNAggregator(src_key=src_key, dst_key=dst_key, units = agg_units, src_in_units=src_in_units, dst_in_units=dst_in_units, num_links=num_links, dropout_rate=dropout_rate, accum=agg_accum, act=agg_act, prefix='agg_') self.user_out_fcs = nn.Dense(out_units, flatten=False, prefix='user_out_') self.item_out_fcs = nn.Dense(out_units, flatten=False, prefix='item_out_') self._out_act = get_activation(out_act) def forward(self, graph): user_h, item_h = self.aggregator(graph) out_user = self._out_act(self.user_out_fcs(user_h)) out_item = self._out_act(self.item_out_fcs(item_h)) return out_user, out_item class BiDecoder(HybridBlock): def __init__(self, in_units, out_units, num_basis_functions=2, prefix=None, params=None): super(BiDecoder, self).__init__(prefix=prefix, params=params) self._num_basis_functions = num_basis_functions with self.name_scope(): for i in range(num_basis_functions): self.__setattr__('weight{}'.format(i), self.params.get('weight{}'.format(i), shape=(in_units, in_units), init=mx.initializer.Orthogonal(scale=1.1, rand_type='normal'), allow_deferred_init=True)) self.rate_out = nn.Dense(units=out_units, flatten=False, use_bias=False, prefix="rate_") def hybrid_forward(self, F, data1, data2, **kwargs): basis_outputs_l = [] for i in range(self._num_basis_functions): basis_out = F.sum(F.dot(data1, kwargs["weight{}".format(i)]) * data2, axis=1, keepdims=True) basis_outputs_l.append(basis_out) basis_outputs = F.concat(*basis_outputs_l, dim=1) out = self.rate_out(basis_outputs) return out class InnerProductLayer(HybridBlock): def __init__(self, mid_units=None, **kwargs): super(InnerProductLayer, self).__init__(**kwargs) self._mid_units = mid_units if self._mid_units is not None: self._mid_map = nn.Dense(mid_units, flatten=False) def hybrid_forward(self, F, data1, data2): if self._mid_units is not None: data1 = self._mid_map(data1) data2 = self._mid_map(data2) score = F.sum(data1 * data2, axis=1, keepdims=True) return score ```
{ "source": "jennyzhang0215/DRKG", "score": 3 }
#### File: DRKG/utils/utils.py ```python import os import tarfile def download_and_extract(): import shutil import requests url = "https://s3.us-west-2.amazonaws.com/dgl-data/dataset/DRKG/drkg.tar.gz" path = "../data/" filename = "drkg.tar.gz" fn = os.path.join(path, filename) if os.path.exists("../data/drkg/drkg.tsv"): return opener, mode = tarfile.open, 'r:gz' os.makedirs(path, exist_ok=True) cwd = os.getcwd() os.chdir(path) while True: try: file = opener(filename, mode) try: file.extractall() finally: file.close() break except Exception: f_remote = requests.get(url, stream=True) sz = f_remote.headers.get('content-length') assert f_remote.status_code == 200, 'fail to open {}'.format(url) with open(filename, 'wb') as writer: for chunk in f_remote.iter_content(chunk_size=1024*1024): writer.write(chunk) print('Download finished. Unzipping the file...') os.chdir(cwd) ```
{ "source": "jennyzhang0215/Neural-LP", "score": 3 }
#### File: KGC/data/convert_dict2list.py ```python import numpy as np import os def gen_dict(file_name): d_dict = {} f = open(file_name, "r") for line in f: vec = line.strip('\n').split("\t") d_dict[vec[1]] = int(vec[0]) return d_dict data_name = 'fb15k-237' test_file = os.path.join(data_name, 'test.txt') rel_file = os.path.join(data_name, 'relations.tsv') entity_file = os.path.join(data_name, 'entities.tsv') rel_dict = gen_dict(rel_file) entity_dict = gen_dict(entity_file) # print("rel_dict", rel_dict) # print("entity_dict", entity_dict) tests = np.loadtxt(test_file, delimiter='\t', dtype=str) #heads = list(map(entity_dict.get, tests[:1, 0])) rels = list(map(rel_dict.get, tests[:1, 1])) tails = list(map(entity_dict.get, tests[:1, 2])) #np.savetxt(os.path.join(data_name, 'head.list'), heads, fmt='%s') np.savetxt(os.path.join(data_name, 'rel.list'), rels, fmt='%s') np.savetxt(os.path.join(data_name, 'tail.list'), tails, fmt='%s') ```
{ "source": "jenojp/negspacy", "score": 2 }
#### File: negspacy/negspacy/test_scispacy_dep.py ```python import pytest import spacy import copy import negation from termsets import termset from spacy.pipeline import EntityRuler def build_med_docs(): docs = list() docs.append( ( "Patient denies cardiovascular disease but has headaches. No history of smoking. Alcoholism unlikely. Smoking not ruled out.", [ ("Patient denies", False), ("cardiovascular disease", True), ("smoking", True), ("Alcoholism", True), ("unlikely", False), ("Smoking", False), ], ) ) docs.append( ( "No history of headaches, prbc, smoking, acid reflux, or GERD.", [ ("No history", True), ("headaches", True), ("prbc", True), ("smoking", True), ("acid reflux", True), ("GERD", True), ], ) ) docs.append( ( "Alcoholism was not the cause of liver disease.", [("Alcoholism", True), ("liver disease", False)], ) ) return docs def test_umls(): nlp = spacy.load("en_core_sci_sm") ts = termset("en_clinical") nlp.add_pipe( "negex", config={ "neg_termset": ts.get_patterns(), "ent_types": ["ENTITY"], "chunk_prefix": ["no"], }, last=True, ) # negex = Negex( # nlp, language="en_clinical", ent_types=["ENTITY"], chunk_prefix=["no"] # ) # nlp.add_pipe("negex", last=True) docs = build_med_docs() for d in docs: doc = nlp(d[0]) print(doc.ents) for i, e in enumerate(doc.ents): print(e.text, e._.negex) assert (e.text, e._.negex) == d[1][i] def __test_umls2(): nlp = spacy.load("en_core_sci_sm") # negex = Negex( # nlp, language="en_clinical_sensitive", ent_types=["ENTITY"], chunk_prefix=["no"] # ) # nlp.add_pipe("negex", last=True) ts = termset("en_clinical") nlp.add_pipe( "negex", config={ "neg_termset": ts.get_patterns(), "ent_types": ["ENTITY"], "chunk_prefix": ["no"], }, last=True, ) docs = build_med_docs() for d in docs: doc = nlp(d[0]) for i, e in enumerate(doc.ents): print(e.text, e._.negex) assert (e.text, e._.negex) == d[1][i] def test_issue_14(): nlp = spacy.load("en_core_sci_sm") # negex = Negex(nlp, language="en_clinical", chunk_prefix=["no", "cancer free"]) # negex.remove_patterns(following_negations="free") ts = termset("en_clinical") ts.remove_patterns({"following_negations": ["free"]}) print(ts.get_patterns()) nlp.add_pipe( "negex", config={ "neg_termset": ts.get_patterns(), "chunk_prefix": ["no", "cancer free"], }, last=True, ) doc = nlp("The patient has a cancer free diagnosis") expected = [False, True] for i, e in enumerate(doc.ents): print(e.text, e._.negex) assert e._.negex == expected[i] nlp.remove_pipe("negex") # negex = Negex(nlp, language="en_clinical", chunk_prefix=["no", "free"]) # nlp.add_pipe("negex", last=True) ts = termset("en_clinical") nlp.add_pipe( "negex", config={ "neg_termset": ts.get_patterns(), "chunk_prefix": ["no", "free"], }, last=True, ) doc = nlp("The patient has a cancer free diagnosis") expected = [False, False] for i, e in enumerate(doc.ents): print(e.text, e._.negex) assert e._.negex == expected[i] if __name__ == "__main__": test_umls() test_umls2() test_issue_14() ```
{ "source": "jenoOvchi/evidently", "score": 2 }
#### File: model_profile/sections/cat_target_drift_profile_section.py ```python from datetime import datetime from typing import Any from typing import Dict from typing import Iterable from typing import Optional from typing import Type from evidently.analyzers.base_analyzer import Analyzer from evidently.analyzers.cat_target_drift_analyzer import CatTargetDriftAnalyzer from evidently.model_profile.sections.base_profile_section import ProfileSection class CatTargetDriftProfileSection(ProfileSection): def part_id(self) -> str: return 'cat_target_drift' def __init__(self) -> None: super().__init__() self.analyzers_types = [CatTargetDriftAnalyzer] self._result = None def analyzers(self) -> Iterable[Type[Analyzer]]: return self.analyzers_types def calculate(self, reference_data, current_data, column_mapping, analyzers_results) -> None: result = CatTargetDriftAnalyzer.get_results(analyzers_results) result_json: Dict[str, Any] = result.columns.as_dict() result_json['metrics'] = {} if result.target_metrics: result_json['metrics']['target_name'] = result.target_metrics.column_name result_json['metrics']['target_type'] = 'cat' result_json['metrics']['target_drift'] = result.target_metrics.drift if result.prediction_metrics: result_json['metrics']['prediction_name'] = result.prediction_metrics.column_name result_json['metrics']['prediction_type'] = 'cat' result_json['metrics']['prediction_drift'] = result.prediction_metrics.drift self._result = { 'name': self.part_id(), 'datetime': str(datetime.now()), 'data': result_json } def get_results(self) -> Optional[dict]: return self._result ``` #### File: model_profile/sections/num_target_drift_profile_section.py ```python from datetime import datetime from evidently.analyzers.num_target_drift_analyzer import NumTargetDriftAnalyzer from evidently.model_profile.sections.base_profile_section import ProfileSection class NumTargetDriftProfileSection(ProfileSection): def part_id(self) -> str: return 'num_target_drift' def __init__(self): super().__init__() self.analyzers_types = [NumTargetDriftAnalyzer] self._result = None def analyzers(self): return self.analyzers_types def calculate(self, reference_data, current_data, column_mapping, analyzers_results): result = NumTargetDriftAnalyzer.get_results(analyzers_results) result_json = result.columns.as_dict() result_json['metrics'] = {} if result.target_metrics: result_json['metrics']['target_name'] = result.target_metrics.column_name result_json['metrics']['target_type'] = 'num' result_json['metrics']['target_drift'] = result.target_metrics.drift result_json['metrics']['target_correlations'] = { 'current': result.target_metrics.current_correlations, 'reference': result.target_metrics.reference_correlations, } if result.prediction_metrics: result_json['metrics']['prediction_name'] = result.prediction_metrics.column_name result_json['metrics']['prediction_type'] = 'num' result_json['metrics']['prediction_drift'] = result.prediction_metrics.drift result_json['metrics']['prediction_correlations'] = { 'current': result.prediction_metrics.current_correlations, 'reference': result.prediction_metrics.reference_correlations, } self._result = { 'name': self.part_id(), 'datetime': str(datetime.now()), 'data': result_json } def get_results(self): return self._result ``` #### File: model_profile/sections/test_data_drift_profile_section.py ```python import pandas from evidently import ColumnMapping from evidently.model_profile.sections.data_drift_profile_section import DataDriftProfileSection from .helpers import calculate_section_results from .helpers import check_profile_section_result_common_part from .helpers import check_section_without_calculation_results def _check_feature_metrics(feature_metric: dict, feature_type: str): assert 'current_small_hist' in feature_metric assert 'feature_type' in feature_metric assert feature_metric['feature_type'] == feature_type assert 'p_value' in feature_metric assert 'ref_small_hist' in feature_metric def test_no_calculation_results() -> None: check_section_without_calculation_results(DataDriftProfileSection, 'data_drift') def test_data_drift_profile_section_with_calculated_results(): current_data = pandas.DataFrame({ 'target': [1, 2, 3, 4], 'numerical_feature': [0.5, 0.0, 4.8, 2.1], 'categorical_feature': [1, 1, 0, 1], }) reference_data = current_data[:2] data_columns = ColumnMapping( numerical_features=['numerical_feature'], categorical_features=['categorical_feature'], target_names=['drift_target_result'] ) data_drift_profile_section_result = calculate_section_results(DataDriftProfileSection, reference_data, current_data, data_columns) data_columns = ColumnMapping( numerical_features=['numerical_feature'], categorical_features=['categorical_feature'], target_names=['drift_target_result'] ) check_profile_section_result_common_part(data_drift_profile_section_result, 'data_drift') result_data = data_drift_profile_section_result['data'] assert 'options' in result_data # check metrics assert 'metrics' in result_data assert 'dataset_drift' in result_data['metrics'] assert 'n_drifted_features' in result_data['metrics'] assert 'n_features' in result_data['metrics'] assert result_data['metrics']['n_features'] == 2 assert 'numerical_feature' in result_data['metrics'] _check_feature_metrics(result_data['metrics']['numerical_feature'], 'num') assert 'categorical_feature' in result_data['metrics'] _check_feature_metrics(result_data['metrics']['categorical_feature'], 'cat') ``` #### File: model_profile/sections/test_regression_performance_profile_section.py ```python from typing import Any from typing import Dict import pandas import pytest from evidently.model_profile.sections.regression_performance_profile_section import \ RegressionPerformanceProfileSection from .helpers import calculate_section_results from .helpers import check_profile_section_result_common_part from .helpers import check_section_without_calculation_results def check_regression_performance_metrics_dict(metrics: Dict[str, Any]) -> None: assert 'abs_error_std' in metrics assert 'abs_perc_error_std' in metrics assert 'error_normality' in metrics assert 'error_std' in metrics assert 'mean_abs_error' in metrics assert 'mean_abs_perc_error' in metrics assert 'mean_error' in metrics assert 'underperformance' in metrics def test_no_calculation_results() -> None: check_section_without_calculation_results(RegressionPerformanceProfileSection, 'regression_performance') @pytest.mark.parametrize( 'reference_data,current_data', ( (pandas.DataFrame({'target': [3, 5, 3, 7], 'prediction': [1, 2, 7, 4]}), None), ( pandas.DataFrame({'target': [1, 9, 3, 4], 'prediction': [1, 4, 1, 4]}), pandas.DataFrame({'target': [1, 1, 1, 3], 'prediction': [1, 2, 1, 0]}), ), ) ) def test_profile_section_with_calculated_results(reference_data, current_data) -> None: section_result = calculate_section_results( RegressionPerformanceProfileSection, reference_data, current_data ) check_profile_section_result_common_part(section_result, 'regression_performance') result_data = section_result['data'] # check metrics structure and types, ignore concrete metrics values assert 'metrics' in result_data metrics = result_data['metrics'] assert 'error_bias' in metrics assert 'reference' in metrics reference_metrics = metrics['reference'] check_regression_performance_metrics_dict(reference_metrics) if current_data is not None: assert 'current' in metrics current_metrics = metrics['current'] check_regression_performance_metrics_dict(current_metrics) ```
{ "source": "jenosavel/tinytest", "score": 3 }
#### File: jenosavel/tinytest/assertion.py ```python import collections def find(description): return Description(description) def verify(actual): assert isinstance(actual, Actual), 'Expected {0} ({1}). Actual {0} ({2})'.format(actual, Actual, type(actual)) return VerifyObject(actual) class Description(object): def __init__(self, description): self.__description = description def using(self, value): return Actual(self.__description, value) class Actual(object): def __init__(self, description, value): self.__description = description self.__value = value @property def description(self): return self.__description @property def value(self): return self.__value class VerifyObject(object): def __init__(self, actual): self.__actual = actual def __reportCompareString(self, expected, actual, reverse = False, showType = False): return '{0}: Expected{1} {2}{3}. Actual {4}{5}.'.format( self.__actual.description, ' not' if reverse else '', expected, ' ({0})'.format(type(expected)) if showType else '', actual, ' ({0})'.format(type(actual)) if showType else '' ) def __reportTypeString(self, expected, actual, reverse = False): return '{0}: {1}: Expected{2} {3}. Actual {4}.'.format( self.__actual.description, self.__actual.value, ' not' if reverse else '', type(expected), type(actual) ) def __reportInstanceString(self, expected, actual, reverse = False): return '{0}: {1}: Expected{2} {3}. Actual {4}.'.format( self.__actual.description, self.__actual.value, ' not' if reverse else '', expected.__class__, actual.value.__class__ ) def callWith(self, *args, **kwargs): return VerifyCallable(self.__actual, *args, **kwargs) @property def collection(self): return VerifyCollection(self.__actual) def isTrue(self): assert self.__actual.value is True, self.__reportCompareString(True, self.__actual.value) return True def isFalse(self): assert self.__actual.value is False, self.__reportCompareString(False, self.__actual.value) return True def isNone(self): assert self.__actual.value is None, self.__reportCompareString(None, self.__actual.value) return True def isNotNone(self): assert self.__actual.value is not None, self.__reportCompareString(None, self.__actual.value, reverse = True) return True def isEqualTo(self, expected): assert self.__actual.value == expected, self.__reportCompareString(expected, self.__actual.value, showType = True) return True def isNotEqualTo(self, expected): assert self.__actual.value != expected, self.__reportCompareString(expected, self.__actual.value, reverse = True, showType = True) return True def isOfType(self, expected): assert type(self.__actual.value) == expected, self.__reportTypeString(expected, self.__actual.value) return True def isNotOfType(self, expected): assert type(self.__actual.value) != expected, self.__reportTypeString(expected, self.__actual.value, reverse = True) return True def isInstanceOf(self, expected): assert isinstance(self.__actual.value, expected), self.__reportInstanceString(expected, self.__actual.value) return True def isNotInstanceOf(self, expected): assert not isinstance(self.__actual.value, expected), self.__reportInstanceString(expected, self.__actual.value, reverse = True) return True class VerifyCallable(object): def __init__(self, actual, *args, **kwargs): self.__actual = actual self.__args = args self.__kwargs = kwargs def shouldThrow(self, expected): assert issubclass(expected, Exception), '{0} ({1}) is not an exception.'.format(expected, type(expected)) try: self.__actual.value(*self.__args, **self.__kwargs) except expected: return True raise AssertionError('{0}: {1}({2}{3}{4}) did not throw {5}.'.format( self.__actual.description, self.__actual.value.__name__, ', '.join(self.__args), ', ' if self.__args and self.__kwargs else '', ', '.join(['{0}={1}'.format(name, value) for name, value in self.__kwargs.items()]), expected.__name__ )) def shouldNotThrow(self): try: self.__actual.value(*self.__args, **self.__kwargs) except Exception as error: raise type(error)('{0}: {1}'.format(self.__actual.description, error.message)) return True class VerifyCollection(object): def __init__(self, actual): assert isinstance(actual.value, collections.Sequence), '{0} ({1}) is not a sequence.'.format(actual, type(actual)) self.__actual = actual def contains(self, expected): assert self.__actual.value.contains(expected), '{0}: Expected {1} in {2}.'.format(self.__actual.description, expected, self.__actual.value) return True def notContains(self, expected): assert not self.__actual.value.contains(expected), '{0}: Expected {1} not in {2}.'.format(self.__actual.description, expected, self.__actual.value) return True def isEmpty(self): assert not self.__actual.value, '{0}: Expected empty. Actual {1}.'.format(self.__actual.description, self.__actual.value) return True def isNotEmpty(self): assert self.__actual.value, '{0}: Expected not empty. Actual {1}.'.format(self.__actual.description, self.__actual.value) return True def isEqualTo(self, expected): assert isinstance(expected, collections.Sequence), 'Expected value must be a sequence.' assert tuple(self.__actual.value) == tuple(expected), '{0}: Expected {1}. Actual {2}.'.format(self.__actual.description, expected, self.__actual.value) def size(self, expected): actual = sum(1 for _ in self.__actual.value) assert actual == expected, '{0}: Expected size {1}. Actual {2}'.format(self.__actual.description, expected, actual) ``` #### File: jenosavel/tinytest/example.py ```python from tinytest.test import * @describe('My one thing') class MyTest(TestClass): @setup def someKindOfSetup(self): self.log.debug('In setup') @cleanup def cleanupOrSomething(self): self.log.debug('In cleanup') @should('do a thing') def aMethod(self): self.log.debug('Running a successful test here...') @should('do that other thing') def nameDoesntMatter(self): assert False, 'I always fail, just because!' @should('show off assertions') def assertionsRCool(self): thingToTest = 1 actual = find('Unexpected int value').using(thingToTest) verify(actual).isEqualTo(2) @describe('Another thing') class BadTest(TestClass): @setup def aMethod(self): assert False, 'Setup blocker!!' @cleanup def someotherthing(self): self.log.debug('In cleanup') @should('do a thing') def someThings(self): self.log.debug('Testing some things') @skip @should('maybe sometimes do a thing') def thingsImLazyAbout(self): self.log.debug('Skipping some tests here, like a jerk') if __name__ == '__main__': from tinytest import TinyTester TinyTester(level='DEBUG').run([MyTest, BadTest]) ```
{ "source": "jenrei/pytemperaturectrl", "score": 2 }
#### File: pytemperaturectrl/pytemperaturectrl/julabo.py ```python import serial import time from .pytemperaturectrl import TemperatureControl class Julabo(TemperatureControl): """Julabo Temperature control implementation""" # see Julabo doc MIN_TIME_INTERVAL = 0.250 def __init__(self, *args, **kwargs): super(TemperatureControl, self).__init__() self.serial = None def checkIfOpen(self): """ Check if serial port is open """ if self.serial == None: raise Exception("Please call open function before all communication") def open(self, com_port, baudrate=4800): """ Open serial communication""" self.serial = serial.Serial(com_port, baudrate=baudrate, bytesize=serial.SEVENBITS, parity=serial.PARITY_EVEN, stopbits=serial.STOPBITS_ONE, timeout=1, xonxoff=False, rtscts=True, dsrdtr=False) def close(self): """ Close serial communication""" self.checkIfOpen() if self.serial != None : self.serial.close() def power(self, on): """set power to on or off""" self.checkIfOpen() time.sleep(self.MIN_TIME_INTERVAL) value = 1 if on else 0 self.serial.write(b'f"out_mode_05 {value}\r\n"') def getVersion(self): """retrieve engine version""" self.checkIfOpen() time.sleep(self.MIN_TIME_INTERVAL) self.serial.write(b'version\r\n') return self.serial.readline() def getStatus(self): """retrieve engine status""" self.checkIfOpen() time.sleep(self.MIN_TIME_INTERVAL) self.serial.write(b'status\r\n') return self.serial.readline() def setWorkTemperature(self, temperature_in_degree): """set setpoint temperature""" self.checkIfOpen() time.sleep(self.MIN_TIME_INTERVAL) self.serial.write(b'f"out_sp_00 {temperature_in_degree}\r\n"') def getWorkTemperature(self): """get setpoint temperature""" self.checkIfOpen() time.sleep(self.MIN_TIME_INTERVAL) self.serial.write(b'in_sp_00\r\n') return float(self.serial.readline()) def getCurrentTemperature(self): """get current tank temperature""" self.checkIfOpen() time.sleep(self.MIN_TIME_INTERVAL) self.serial.write(b'in_pv_00\r\n') return float(self.serial.readline()) ```
{ "source": "jenrenema/Hack-Planet", "score": 3 }
#### File: Hack-Planet/PythonFiles/move_test.py ```python import uagame import pygame import time from pygame.locals import * def main(): # Create window width = 500 height = 500 window = uagame.Window('Move Test', width, height) window.set_auto_update(False) # Create game game = Game(window) # Play game game.play_game() # Close Window window.close() class Game: # An object in this class represents a complete game def __init__(self, window): # Initialize a Game # - self is the Game to initialize # - window is a uagame.Window object self.window = window self.pause_time = 0.005 self.close_clicked = False self.continue_game = True self.key_pressed = [False, False, False, False] def play_game(self): # Play the game until the player presses the close button # - self is the Game while not self.close_clicked: self.handle_event() self.draw_frame() if self.continue_game: self.update_game_objects() self.should_continue() time.sleep(self.pause_time) def handle_event(self): # Handle each user event # - self is the Game event = pygame.event.poll() if event.type == QUIT: self.close_clicked = True correct_keys = [K_a, K_s, K_w, K_d] list_of_keys = pygame.key.get_pressed() for index in range(len(correct_keys)): if list_of_keys[correct_keys[index]]: self.key_pressed[index] = True else: self.key_pressed[index] = False def update_game_objects(self): # Update the game objects # - self is the Game def draw_frame(self): # Draw all the game objects # - self is the Game self.window.clear() pygame.draw.circle(self.window.get_surface(), pygame.Color("White"), [int(self.window.get_width()/2), int(self.window.get_height()/2)], 2) self.window.update() def should_continue(self): # Check if the game should continue # - self is the Game self.continue_game = True class Ball: # An object in this class represents a circle def __init__(self, window, color): # Initialize a Ball # - self is the Ball to initialize # - window is a uagame.Window object # - color is a pygame.Color object representing the color of the ball self.radius = 7 self.color = color self.center = [int(window.get_width()/2), int(window.get_height()/2)] self.velocity = [5, 1] self.window = window def draw(self): # Draw the Ball # - self is the Ball pygame.draw.circle(self.window.get_surface(), self.color, self.center, self.radius) def move(self): # Move the Ball # - self is the Ball for index in range(2): self.center[index] += self.velocity[index] main() ```
{ "source": "jenrik/archiver", "score": 2 }
#### File: archiver/archiver/utils.py ```python def archive_link(link): # Create a snapshot of the link on the internet archive # ToDo # Ref: https://archive.readme.io/docs/creating-a-snapshot pass ```
{ "source": "JenRodge/manual", "score": 3 }
#### File: JenRodge/manual/build-manual.py ```python import os import argparse from html import escape from pathlib import Path import subprocess import sys import tempfile from bs4 import BeautifulSoup, NavigableString, Tag from natsort import natsorted import regex BLOCK_LEVEL_ELEMENTS = ["p", "figure", "ol", "ul", "section", "pre", "blockquote", "table"] RST_ROLES = """.. role:: html(code) :language: html .. role:: css(code) :language: css .. role:: bash(code) :language: bash .. role:: path(code) .. role:: italics(emphasis) :class: i .. role:: ws(code) :class: ws .. role:: utf(code) :class: utf .. role:: value(code) :class: value .. role:: string(code) :class: string """ RST_ROLES_LINE_COUNT = len(RST_ROLES.splitlines()) def process_ids(section: Tag, current_id: str, current_number: int) -> int: """ Generate potentially nested numeric IDs for a manual directive. """ for elem in section.children: if isinstance(elem, Tag): # By convention, if there's a "no-numbering" class then skip the section if elem.has_attr("class") and elem["class"] != "no-numbering": continue if elem.name == "section" or (elem.name == "li" and elem.parent.name == "ol"): new_id = f"{current_id}.{current_number}" elem["id"] = new_id process_ids(elem, new_id, 1) current_number = current_number + 1 # Descend into <ol>s, and save the current number we get back so that we don't reset it if we # next move on to further sibling sections if elem.name == "ol": current_number = process_ids(elem, current_id, current_number) return current_number class TocItem: """ An item in the manual table of contents. """ def __init__(self, number: str, title: str, filename: str): self.number = number self.title = title self.filename = filename self.items = [] def make_one_page(dest_directory, header_html: str, footer_html: str): """ Generate one-page php file of the manual. """ # Get all php files in destination directory. php_files = list(filter(lambda x: x.endswith("php"), os.listdir(dest_directory))) index = php_files.pop(php_files.index("index.php")) dest_directory = Path(dest_directory) with open(dest_directory / index, "r", encoding="utf-8") as file: index_soup = BeautifulSoup(file, features="html.parser") # The frontmatter contains all needed tags and texts at the beginning of the one-page manual index_soup.find("section")["id"] = "0" frontmatter = index_soup.find("section") # Remove short form ToC frontmatter.find_all("section")[-1].decompose() # Get ToC with open(dest_directory / php_files[0], "r", encoding="utf-8") as file: toc = str(BeautifulSoup(file, features="html.parser").find("nav")) # Format hrefs in ToC for onepager toc = regex.sub(r"(?<=href\=\")([/a-z0-9\.-]+?)(?=#)", "", toc) toc = regex.sub(r"(?<=href\=\")(?P<href>[/0-9a-z-\.]+(?<=/)(?P<chapter>\d+)(?=-)[/0-9a-z-\.]+(?=\"))", r"#\g<chapter>", toc) toc = regex.sub(r"(?<=href=\")(/manual.+?\d)(?=\")", r"#0", toc) # Get chapter tags/text without toc bodymatter = [] php_files = natsorted(list(filter(lambda x: regex.match(r"^\d.+", x), php_files))) for file in php_files: with open(dest_directory / file, "r", encoding="utf-8") as file: section_text = file.read() # Remove everything before and after top level section section_text = section_text[section_text.find("<section"):] section_text = section_text[:section_text.rfind("</section>")] section_text += "</section>" bodymatter.append(section_text) # Writing the one page manual php file (overwrites if exist) with open(dest_directory / "single-page.php", "w+", encoding="utf-8") as file: # Rewind file to erase old file if exist file.seek(0) # Write ToC header_html = regex.sub(r"<main(.+?)>", fr"<main\1>{toc}", header_html) header_html = regex.sub(r"MANUAL_TITLE", "The Standard Ebooks Manual of Style", header_html) header_html = regex.sub(r"<article>", "", header_html) file.write(header_html) # Write frontmatter file.write("<article>\n") file.write(str(frontmatter) + "\n") file.write("</article>\n") # Write bodymatter for chapter in bodymatter: file.write("<article>\n") file.write(str(chapter) + "\n") file.write("</article>\n") # Close with tags and final php tag footer_html = regex.sub(r"</article>", "", footer_html) file.write(footer_html) file.truncate() def main() -> int: """ Entry point for the executable. """ parser = argparse.ArgumentParser(description="Build the Standard Ebooks Manual of Style from a set of .rst files.") parser.add_argument("source_directory", metavar="SOURCE_DIRECTORY", help="a directory containing .rst files comprising the Standard Ebooks Manual of Style") parser.add_argument("dest_directory", metavar="DEST_DIRECTORY", help="a directory to place the output .php files") args = parser.parse_args() return_code = 0 if not os.path.isdir(args.source_directory): print(f"Not a directory: `{args.source_directory}`") return 1 if not os.path.isdir(args.dest_directory): print(f"Not a directory: `{args.dest_directory}`") return 1 toc = [] header_path = Path(args.source_directory) / "templates" / "header.html" footer_path = Path(args.source_directory) / "templates" / "footer.html" try: with open(header_path, "r", encoding="utf-8") as file: header_html = file.read() except: print(f"Couldn’t open `{header_path}`") return 1 try: with open(footer_path, "r", encoding="utf-8") as file: footer_html = file.read() except: print(f"Couldn’t open `{footer_path}`") return 1 with tempfile.TemporaryDirectory() as work_directory: for filename in os.listdir(args.source_directory): if not filename.endswith(".rst"): continue with open(Path(args.source_directory) / filename, "r", encoding="utf-8") as file: rst = file.read() # Add our special RST roles to the top of the file before processing rst = RST_ROLES + rst result = subprocess.run(["rst2html5"], input=rst.encode(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) errors = result.stderr.decode().strip() if errors: print(filename) # Because we add the RST roles to the top of the file, we have to subtract those lines to get the # *real* line number in the RST file that the error occurs in. errors = regex.sub("<stdin>:([0-9]+)", lambda exp: "\tLine {}".format(int(exp.groups()[0]) - RST_ROLES_LINE_COUNT), errors).rstrip() print(errors) return_code = 1 html = result.stdout.decode().strip() matches = regex.findall(r"<h1>(.+?)</h1>", html) if matches: title = matches[0] # Remove empty spans html = regex.sub(r"<span>[^>]*?</span>", "", html, flags=regex.DOTALL) # SE extension: :italics:`abc <def>` will generate a link like so: <i><a href="def">abc</a></i> html = regex.sub(r"<em class=\"i\">([^>]+?) &lt;([^<]+?)&gt;</em>", r"""<i><a href="\2">\1</a></i>""", html) # SE extension: change <em class="i"> to <i> html = regex.sub(r"<em class=\"i\">([^<]+?)</em>", r"<i>\1</i>", html) # Change :ws: and :utf: markers to <span>s html = regex.sub(r":(ws|utf):`([^`]+?)`", r"""<span class="\1">\2</span>""", html) # Remove comments html = regex.sub(r"<!--.+?-->", "", html) # Pygments doesn't add colors to html that is just a namespaced attribute, like :html:`xml:lang`. Add that here. html = regex.sub(r"""<code class="html">([a-zA-Z\-:]+?)</code>""", r"""<code class="html"><span class="na">\1</span></code>""", html) root_number = None matches = regex.findall(r"^([0-9]+)\-", filename) if matches: root_number = matches[0] # Now we have some cleaned up HTML. # Start parsing the various <section> and <ol> elements to number them. soup = BeautifulSoup(html, "html.parser") if root_number: # Set the ID on the top-level manual section top_level_section = soup.select("body > section")[0] top_level_section["id"] = root_number # Do the actual numbering process_ids(top_level_section, root_number, 1) # Record the number and its h2 children in the ToC toc_item = TocItem(root_number, title, filename.replace(".rst", "")) for header in soup.select("h2"): toc_item.items.append(TocItem(header.parent["id"], header.text, None)) toc.append(toc_item) # rst2html5 doesn't wrap the first child of <li> elements in <p>. # Try to do that here. for li_item in soup.select("li"): need_wrapping = [] for elem in li_item.contents: if isinstance(elem, NavigableString) or elem.name not in BLOCK_LEVEL_ELEMENTS: need_wrapping.append(elem) if elem.name in BLOCK_LEVEL_ELEMENTS: break if need_wrapping: new_tag = soup.new_tag("p") for elem in need_wrapping: new_tag.append(elem) li_item.insert(0, new_tag) # Now that we've got our structure done, insert <aside>s that have the section numbers in them. for elem in soup.find_all(attrs={"id": regex.compile(r"^[0-9\.]+$")}): aside = soup.new_tag("aside") aside["class"] = "number" # Add a link to the section within the section <aside>, but only if it is not the main section number (like "2" or "8") if regex.match(r"^[0-9]$", elem["id"]): aside.string = elem["id"] else: link = soup.new_tag("a") link["href"] = f"#{elem['id']}" link.string = elem["id"] aside.insert(0, link) elem.insert(0, aside) html = str(soup) # Now that we've added IDs and <aside>s, remove the now-unnecessary "no-numbering" class html = html.replace(" class=\"no-numbering\"", "") # Add a <b> around the first word in a bash command, to highlight it. html = regex.sub(r"<code class=\"bash\">([a-z]+) ", r"""<code class="bash"><b>\1</b> """, html) # Add syntax highlighting around value strings html = regex.sub(r"<code class=\"value\">([^<]+?)</code>", r"""<code class="bash"><span class="s">\1</span></code>""", html) # Remove everything up to and including the body element so that we can add our own headers and footers html = regex.sub(r".+?<body>", "", html, flags=regex.DOTALL) html = regex.sub(r"</body>.*", "", html, flags=regex.DOTALL) # If we use CSS properties like -epub-hyphens, the colorizer considers them errors and adds error coloring. Remove that here. html = regex.sub(r"""<span class="err">-</span><span class="n">(.+?)</span>""", r"""<span class="k">-\1</span>""", html) # Convert spaces to tabs html = regex.sub(r" ", "\t", html) # Add PHP headers and footers html = header_html + html + footer_html # Replace <pre> with <figure>. # Do this last, because editing with BS4 and pretty printing can muck up # spacing in <pre> elements if the elements are removed early html = regex.sub(r"<pre data-language=\"([^\"]+?)\">", r"""<figure class="\1 full"><code class="\1 full">""", html) html = regex.sub(r"<pre class=\"([^\"]+?)\" data-language=\"([^\"]+?)\">", r"""<figure class="\1 \2 full"><code class="\2 full">""", html) html = regex.sub(r"<pre data-language=\"([^\"]+?)\" class=\"([^\"]+?)\">", r"""<figure class="\1 \2 full"><code class="\1 full">""", html) html = regex.sub(r"</pre>", r"</code></figure>", html) # Fill in <title> elements if filename == "index.rst": version = regex.findall(r"\.\. version: (.+)", rst)[0] html = regex.sub(r"MANUAL_TITLE", "The Standard Ebooks Manual of Style", html) html = regex.sub(r"<section id=\".+?\"", r"<section", html) else: html = regex.sub(r"MANUAL_TITLE", f"{root_number}. {title} - The Standard Ebooks Manual of Style", html) # Replace instances of PD_YEAR with PHP echo code html = regex.sub(r"PD_YEAR", "<?= PD_YEAR ?>", html) with open(Path(work_directory) / filename.replace(".rst", ".php"), "w", encoding="utf-8") as file: file.write(html) file.truncate() # Now, generate the ToC toc = natsorted(toc, key=lambda x: x.number) toc_html = f"<nav><p><a href=\"/manual/{version}\">The Standard Ebooks Manual of Style</a></p><ol>" for toc_item in toc: toc_html += f"<li><p><a href=\"/manual/{version}/{toc_item.filename}\">{toc_item.number}. {escape(toc_item.title)}</a></p><ol>" for sub_item in toc_item.items: toc_html += f"<li><p><a href=\"/manual/{version}/{toc_item.filename}#{sub_item.number}\">{sub_item.number} {escape(sub_item.title)}</a></p></li>" toc_html += "</ol></li>" toc_html += "</ol></nav>" # Place the ToC and version number into the final files for filename in os.listdir(work_directory): if not filename.endswith(".php"): continue with open(Path(work_directory) / filename, "r", encoding="utf-8") as file: html = file.read() html = html.replace("VERSION", version) if filename != "index.php": html = regex.sub(r"<article(.*?)>", fr"\n{toc_html}\n<article\1>", html) # Check if pygments generated any errors (for example, missing quotes in an HTML attribute) if "class=\"err\"" in html: print(f"Error colorized code in `{filename}`. Search the file for `class=\"err\"`.") with open(Path(args.dest_directory) / filename, "w", encoding="utf-8") as file: file.write(html) file.truncate() make_one_page(args.dest_directory, header_html, footer_html) return return_code if __name__ == "__main__": sys.exit(main()) ```
{ "source": "Jens22/imitation", "score": 2 }
#### File: imitation/util/util.py ```python import datetime import functools import itertools import os import uuid from typing import ( Callable, Iterable, Iterator, Optional, Sequence, Type, TypeVar, Union, ) import gym import gym_gazebo2 #jw add lib gym_gazebo2 import numpy as np import stable_baselines3 from gym.wrappers import TimeLimit from stable_baselines3.common import monitor from stable_baselines3.common.base_class import BaseAlgorithm from stable_baselines3.common.policies import ActorCriticPolicy, BasePolicy from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnv from imitation.data import wrappers def make_unique_timestamp() -> str: """Timestamp, with random uuid added to avoid collisions.""" ISO_TIMESTAMP = "%Y%m%d_%H%M%S" timestamp = datetime.datetime.now().strftime(ISO_TIMESTAMP) random_uuid = uuid.uuid4().hex[:6] return f"{timestamp}_{random_uuid}" def make_vec_env( env_name: str, n_envs: int = 8, seed: int = 0, parallel: bool = False, log_dir: Optional[str] = None, max_episode_steps: Optional[int] = None, post_wrappers: Optional[Sequence[Callable[[gym.Env, int], gym.Env]]] = None, ) -> VecEnv: """Returns a VecEnv initialized with `n_envs` Envs. Args: env_name: The Env's string id in Gym. n_envs: The number of duplicate environments. seed: The environment seed. parallel: If True, uses SubprocVecEnv; otherwise, DummyVecEnv. log_dir: If specified, saves Monitor output to this directory. max_episode_steps: If specified, wraps each env in a TimeLimit wrapper with this episode length. If not specified and `max_episode_steps` exists for this `env_name` in the Gym registry, uses the registry `max_episode_steps` for every TimeLimit wrapper (this automatic wrapper is the default behavior when calling `gym.make`). Otherwise the environments are passed into the VecEnv unwrapped. post_wrappers: If specified, iteratively wraps each environment with each of the wrappers specified in the sequence. The argument should be a Callable accepting two arguments, the Env to be wrapped and the environment index, and returning the wrapped Env. """ # Resolve the spec outside of the subprocess first, so that it is available to # subprocesses running `make_env` via automatic pickling. spec = gym.spec(env_name) def make_env(i, this_seed): # Previously, we directly called `gym.make(env_name)`, but running # `imitation.scripts.train_adversarial` within `imitation.scripts.parallel` # created a weird interaction between Gym and Ray -- `gym.make` would fail # inside this function for any of our custom environment unless those # environments were also `gym.register()`ed inside `make_env`. Even # registering the custom environment in the scope of `make_vec_env` didn't # work. For more discussion and hypotheses on this issue see PR #160: # https://github.com/HumanCompatibleAI/imitation/pull/160. env = spec.make() # Seed each environment with a different, non-sequential seed for diversity # (even if caller is passing us sequentially-assigned base seeds). int() is # necessary to work around gym bug where it chokes on numpy int64s. env.seed(int(this_seed)) if max_episode_steps is not None: env = TimeLimit(env, max_episode_steps) elif spec.max_episode_steps is not None: env = TimeLimit(env, max_episode_steps=spec.max_episode_steps) # Use Monitor to record statistics needed for Baselines algorithms logging # Optionally, save to disk log_path = None if log_dir is not None: log_subdir = os.path.join(log_dir, "monitor") os.makedirs(log_subdir, exist_ok=True) log_path = os.path.join(log_subdir, f"mon{i:03d}") env = monitor.Monitor(env, log_path) env = wrappers.RolloutInfoWrapper(env) if post_wrappers: for wrapper in post_wrappers: env = wrapper(env, i) return env rng = np.random.RandomState(seed) env_seeds = rng.randint(0, (1 << 31) - 1, (n_envs,)) env_fns = [functools.partial(make_env, i, s) for i, s in enumerate(env_seeds)] if parallel: # See GH hill-a/stable-baselines issue #217 return SubprocVecEnv(env_fns, start_method="forkserver") else: return DummyVecEnv(env_fns) def init_rl( env: Union[gym.Env, VecEnv], model_class: Type[BaseAlgorithm] = stable_baselines3.PPO, policy_class: Type[BasePolicy] = ActorCriticPolicy, **model_kwargs, ): """Instantiates a policy for the provided environment. Args: env: The (vector) environment. model_class: A Stable Baselines RL algorithm. policy_class: A Stable Baselines compatible policy network class. model_kwargs (dict): kwargs passed through to the algorithm. Note: anything specified in `policy_kwargs` is passed through by the algorithm to the policy network. Returns: An RL algorithm. """ # FIXME(sam): verbose=1 and tensorboard_log=None is a hack to prevent SB3 # from reconfiguring the logger after we've already configured it. Should # remove once SB3 issue #109 is fixed (there are also >=2 other comments to # this effect elsewhere; worth grepping for "#109"). all_kwargs = { "verbose": 1, "tensorboard_log": None, } all_kwargs.update(model_kwargs) return model_class( policy_class, env, **all_kwargs ) # pytype: disable=not-instantiable def docstring_parameter(*args, **kwargs): """Treats the docstring as a format string, substituting in the arguments.""" def helper(obj): obj.__doc__ = obj.__doc__.format(*args, **kwargs) return obj return helper T = TypeVar("T") def endless_iter(iterable: Iterable[T]) -> Iterator[T]: """Generator that endlessly yields elements from iterable. If any call to `iter(iterable)` has no elements, then this function raises ValueError. >>> x = range(2) >>> it = endless_iter(x) >>> next(it) 0 >>> next(it) 1 >>> next(it) 0 """ try: next(iter(iterable)) except StopIteration: err = ValueError(f"iterable {iterable} had no elements to iterate over.") raise err return itertools.chain.from_iterable(itertools.repeat(iterable)) ```
{ "source": "jens321/allennlp-models", "score": 3 }
#### File: lm/language_model_heads/linear.py ```python from overrides import overrides import torch from allennlp.data import Vocabulary from allennlp_models.lm.language_model_heads.language_model_head import LanguageModelHead @LanguageModelHead.register("linear") class LinearLanguageModelHead(LanguageModelHead): """ Uses `torch.nn.Linear` as a language model head. Does nothing else fancy. This was intended largely for testing code with small models and simple components. It's likely that you would want something nicer for actually training a language model, such as tying weights with an input embedding, or an adaptive softmax, or something. """ def __init__(self, vocab: Vocabulary, input_dim: int, vocab_namespace: str) -> None: super().__init__() self.input_dim = input_dim self.output_dim = vocab.get_vocab_size(vocab_namespace) if self.output_dim <= 0: raise ValueError("We can't embed into an empty vocabulary.") self.linear = torch.nn.Linear(self.input_dim, self.output_dim) @overrides def get_input_dim(self) -> int: return self.input_dim @overrides def get_output_dim(self) -> int: return self.output_dim def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return self.linear(hidden_states) ``` #### File: rc/bidaf/bidaf_predictor.py ```python from copy import deepcopy from typing import Dict, List from overrides import overrides import numpy from allennlp.common.util import JsonDict from allennlp.data import Instance from allennlp.predictors.predictor import Predictor from allennlp.data.fields import ( IndexField, ListField, LabelField, SpanField, SequenceLabelField, SequenceField, ) @Predictor.register("reading-comprehension") class ReadingComprehensionPredictor(Predictor): """ Predictor for the :class:`~allennlp_rc.models.bidaf.BidirectionalAttentionFlow` model, and any other model that takes a question and passage as input. """ def predict(self, question: str, passage: str) -> JsonDict: """ Make a machine comprehension prediction on the supplied input. See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task. Parameters ---------- question : ``str`` A question about the content in the supplied paragraph. The question must be answerable by a span in the paragraph. passage : ``str`` A paragraph of information relevant to the question. Returns ------- A dictionary that represents the prediction made by the system. The answer string will be under the "best_span_str" key. """ return self.predict_json({"passage": passage, "question": question}) @overrides def _json_to_instance(self, json_dict: JsonDict) -> Instance: """ Expects JSON that looks like ``{"question": "...", "passage": "..."}``. """ question_text = json_dict["question"] passage_text = json_dict["passage"] return self._dataset_reader.text_to_instance(question_text, passage_text) @overrides def predictions_to_labeled_instances( self, instance: Instance, outputs: Dict[str, numpy.ndarray] ) -> List[Instance]: new_instance = deepcopy(instance) # For BiDAF if "best_span" in outputs: span_start_label = outputs["best_span"][0] span_end_label = outputs["best_span"][1] passage_field: SequenceField = new_instance["passage"] # type: ignore new_instance.add_field("span_start", IndexField(int(span_start_label), passage_field)) new_instance.add_field("span_end", IndexField(int(span_end_label), passage_field)) # For NAQANet model. It has the fields: answer_as_passage_spans, answer_as_question_spans, # answer_as_add_sub_expressions, answer_as_counts. We need labels for all. elif "answer" in outputs: answer_type = outputs["answer"]["answer_type"] # When the problem is a counting problem if answer_type == "count": field = ListField([LabelField(int(outputs["answer"]["count"]), skip_indexing=True)]) new_instance.add_field("answer_as_counts", field) # When the answer is in the passage elif answer_type == "passage_span": # TODO(mattg): Currently we only handle one predicted span. span = outputs["answer"]["spans"][0] # Convert character span indices into word span indices word_span_start = None word_span_end = None offsets = new_instance["metadata"].metadata["passage_token_offsets"] # type: ignore for index, offset in enumerate(offsets): if offset[0] == span[0]: word_span_start = index if offset[1] == span[1]: word_span_end = index passage_field: SequenceField = new_instance["passage"] # type: ignore field = ListField([SpanField(word_span_start, word_span_end, passage_field)]) new_instance.add_field("answer_as_passage_spans", field) # When the answer is an arithmetic calculation elif answer_type == "arithmetic": # The different numbers in the passage that the model encounters sequence_labels = outputs["answer"]["numbers"] numbers_field: ListField = instance["number_indices"] # type: ignore # The numbers in the passage are given signs, that's what we are labeling here. # Negative signs are given the class label 2 (for 0 and 1, the sign matches the # label). labels = [] for label in sequence_labels: if label["sign"] == -1: labels.append(2) else: labels.append(label["sign"]) # There's a dummy number added in the dataset reader to handle passages with no # numbers; it has a label of 0 (not included). labels.append(0) field = ListField([SequenceLabelField(labels, numbers_field)]) new_instance.add_field("answer_as_add_sub_expressions", field) # When the answer is in the question elif answer_type == "question_span": span = outputs["answer"]["spans"][0] # Convert character span indices into word span indices word_span_start = None word_span_end = None question_offsets = new_instance["metadata"].metadata[ # type: ignore "question_token_offsets" ] for index, offset in enumerate(question_offsets): if offset[0] == span[0]: word_span_start = index if offset[1] == span[1]: word_span_end = index question_field: SequenceField = new_instance["question"] # type: ignore field = ListField([SpanField(word_span_start, word_span_end, question_field)]) new_instance.add_field("answer_as_question_spans", field) return [new_instance] ``` #### File: rc/transformer_qa/transformer_qa_model.py ```python import logging from typing import Any, Dict, List, Optional import numpy as np import torch from allennlp.common.util import sanitize_wordpiece from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder from allennlp.modules.token_embedders import PretrainedTransformerEmbedder from allennlp.nn.util import get_token_ids_from_text_field_tensors from torch import nn from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.nn import util from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy from torch.nn.functional import cross_entropy from ..common import get_best_span from ..common import SquadEmAndF1 logger = logging.getLogger(__name__) @Model.register("transformer_qa") class TransformerQA(Model): """ This class implements a reading comprehension model patterned after the proposed model in https://arxiv.org/abs/1810.04805 (Devlin et al), with improvements borrowed from the SQuAD model in the transformers project. It predicts start tokens and end tokens with a linear layer on top of word piece embeddings. Note that the metrics that the model produces are calculated on a per-instance basis only. Since there could be more than one instance per question, these metrics are not the official numbers on the SQuAD task. To get official numbers, run the script in scripts/transformer_qa_eval.py. Parameters ---------- vocab : ``Vocabulary`` transformer_model_name : ``str``, optional (default=``bert-base-cased``) This model chooses the embedder according to this setting. You probably want to make sure this is set to the same thing as the reader. """ def __init__( self, vocab: Vocabulary, transformer_model_name: str = "bert-base-cased", hidden_size=768, **kwargs ) -> None: super().__init__(vocab, **kwargs) self._text_field_embedder = BasicTextFieldEmbedder( {"tokens": PretrainedTransformerEmbedder(transformer_model_name, hidden_size=hidden_size, task="QA")} ) self._linear_layer = nn.Linear(self._text_field_embedder.get_output_dim(), 2) self._span_start_accuracy = CategoricalAccuracy() self._span_end_accuracy = CategoricalAccuracy() self._span_accuracy = BooleanAccuracy() self._per_instance_metrics = SquadEmAndF1() def forward( # type: ignore self, question_with_context: Dict[str, Dict[str, torch.LongTensor]], context_span: torch.IntTensor, answer_span: Optional[torch.IntTensor] = None, metadata: List[Dict[str, Any]] = None, ) -> Dict[str, torch.Tensor]: """ Parameters ---------- question_with_context : Dict[str, torch.LongTensor] From a ``TextField``. The model assumes that this text field contains the context followed by the question. It further assumes that the tokens have type ids set such that any token that can be part of the answer (i.e., tokens from the context) has type id 0, and any other token (including [CLS] and [SEP]) has type id 1. context_span : ``torch.IntTensor`` From a ``SpanField``. This marks the span of word pieces in ``question`` from which answers can come. answer_span : ``torch.IntTensor``, optional From a ``SpanField``. This is the thing we are trying to predict - the span of text that marks the answer. If given, we compute a loss that gets included in the output directory. metadata : ``List[Dict[str, Any]]``, optional If present, this should contain the question id, and the original texts of context, question, tokenized version of both, and a list of possible answers. The length of the ``metadata`` list should be the batch size, and each dictionary should have the keys ``id``, ``question``, ``context``, ``question_tokens``, ``context_tokens``, and ``answers``. Returns ------- An output dictionary consisting of: span_start_logits : torch.FloatTensor A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log probabilities of the span start position. span_start_probs : torch.FloatTensor The result of ``softmax(span_start_logits)``. span_end_logits : torch.FloatTensor A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log probabilities of the span end position (inclusive). span_end_probs : torch.FloatTensor The result of ``softmax(span_end_logits)``. best_span : torch.IntTensor The result of a constrained inference over ``span_start_logits`` and ``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)`` and each offset is a token index. best_span_scores : torch.FloatTensor The score for each of the best spans. loss : torch.FloatTensor, optional A scalar loss to be optimised. best_span_str : List[str] If sufficient metadata was provided for the instances in the batch, we also return the string from the original passage that the model thinks is the best answer to the question. """ embedded_question = self._text_field_embedder(question_with_context) logits = self._linear_layer(embedded_question) span_start_logits, span_end_logits = logits.split(1, dim=-1) span_start_logits = span_start_logits.squeeze(-1) span_end_logits = span_end_logits.squeeze(-1) possible_answer_mask = torch.zeros_like( get_token_ids_from_text_field_tensors(question_with_context), dtype=torch.bool ) for i, (start, end) in enumerate(context_span): possible_answer_mask[i, start : end + 1] = True span_start_logits = util.replace_masked_values( span_start_logits, possible_answer_mask, -1e32 ) span_end_logits = util.replace_masked_values(span_end_logits, possible_answer_mask, -1e32) span_start_probs = torch.nn.functional.softmax(span_start_logits, dim=-1) span_end_probs = torch.nn.functional.softmax(span_end_logits, dim=-1) best_spans = get_best_span(span_start_logits, span_end_logits) best_span_scores = torch.gather( span_start_logits, 1, best_spans[:, 0].unsqueeze(1) ) + torch.gather(span_end_logits, 1, best_spans[:, 1].unsqueeze(1)) best_span_scores = best_span_scores.squeeze(1) output_dict = { "span_start_logits": span_start_logits, "span_start_probs": span_start_probs, "span_end_logits": span_end_logits, "span_end_probs": span_end_probs, "best_span": best_spans, "best_span_scores": best_span_scores, } # Compute the loss for training. if answer_span is not None: span_start = answer_span[:, 0] span_end = answer_span[:, 1] span_mask = span_start != -1 self._span_accuracy( best_spans, answer_span, span_mask.unsqueeze(-1).expand_as(best_spans) ) start_loss = cross_entropy(span_start_logits, span_start, ignore_index=-1) if torch.any(start_loss > 1e9): logger.critical("Start loss too high (%r)", start_loss) logger.critical("span_start_logits: %r", span_start_logits) logger.critical("span_start: %r", span_start) assert False end_loss = cross_entropy(span_end_logits, span_end, ignore_index=-1) if torch.any(end_loss > 1e9): logger.critical("End loss too high (%r)", end_loss) logger.critical("span_end_logits: %r", span_end_logits) logger.critical("span_end: %r", span_end) assert False loss = (start_loss + end_loss) / 2 self._span_start_accuracy(span_start_logits, span_start, span_mask) self._span_end_accuracy(span_end_logits, span_end, span_mask) output_dict["loss"] = loss # Compute the EM and F1 on SQuAD and add the tokenized input to the output. if metadata is not None: best_spans = best_spans.detach().cpu().numpy() output_dict["best_span_str"] = [] context_tokens = [] for metadata_entry, best_span in zip(metadata, best_spans): context_tokens_for_question = metadata_entry["context_tokens"] context_tokens.append(context_tokens_for_question) best_span -= 1 + len(metadata_entry["question_tokens"]) + 2 assert np.all(best_span >= 0) predicted_start, predicted_end = tuple(best_span) while ( predicted_start >= 0 and context_tokens_for_question[predicted_start].idx is None ): predicted_start -= 1 if predicted_start < 0: logger.warning( f"Could not map the token '{context_tokens_for_question[best_span[0]].text}' at index " f"'{best_span[0]}' to an offset in the original text." ) character_start = 0 else: character_start = context_tokens_for_question[predicted_start].idx while ( predicted_end < len(context_tokens_for_question) and context_tokens_for_question[predicted_end].idx is None ): predicted_end += 1 if predicted_end >= len(context_tokens_for_question): logger.warning( f"Could not map the token '{context_tokens_for_question[best_span[1]].text}' at index " f"'{best_span[1]}' to an offset in the original text." ) character_end = len(metadata_entry["context"]) else: end_token = context_tokens_for_question[predicted_end] character_end = end_token.idx + len(sanitize_wordpiece(end_token.text)) best_span_string = metadata_entry["context"][character_start:character_end] output_dict["best_span_str"].append(best_span_string) answers = metadata_entry.get("answers") if len(answers) > 0: self._per_instance_metrics(best_span_string, answers) output_dict["context_tokens"] = context_tokens return output_dict def get_metrics(self, reset: bool = False) -> Dict[str, float]: exact_match, f1_score = self._per_instance_metrics.get_metric(reset) return { "start_acc": self._span_start_accuracy.get_metric(reset), "end_acc": self._span_end_accuracy.get_metric(reset), "span_acc": self._span_accuracy.get_metric(reset), "per_instance_em": exact_match, "per_instance_f1": f1_score, } ``` #### File: rc/transformer_qa/transformer_qa_predictor.py ```python from typing import List, Dict, Any from allennlp.models import Model from overrides import overrides from allennlp.common.util import JsonDict, sanitize from allennlp.data import Instance, DatasetReader from allennlp.predictors.predictor import Predictor @Predictor.register("transformer_qa") class TransformerQAPredictor(Predictor): """ Predictor for the :class:`~allennlp_rc.models.TransformerQA` model, and any other model that takes a question and passage as input. """ def __init__(self, model: Model, dataset_reader: DatasetReader) -> None: super(TransformerQAPredictor, self).__init__(model, dataset_reader) self._next_qid = 1 def predict(self, question: str, passage: str) -> JsonDict: """ Make a machine comprehension prediction on the supplied input. See https://rajpurkar.github.io/SQuAD-explorer/ for more information about the machine comprehension task. Parameters ---------- question : ``str`` A question about the content in the supplied paragraph. The question must be answerable by a span in the paragraph. passage : ``str`` A paragraph of information relevant to the question. Returns ------- A dictionary that represents the prediction made by the system. The answer string will be under the "best_span_str" key. """ return self.predict_json({"context": passage, "question": question}) def predict_json(self, inputs: JsonDict) -> JsonDict: results = self.predict_batch_json([inputs]) assert len(results) == 1 return results[0] @overrides def _json_to_instance(self, json_dict: JsonDict) -> Instance: raise NotImplementedError( "This predictor maps a question to multiple instances. " "Please use _json_to_instances instead." ) def _json_to_instances(self, json_dict: JsonDict) -> List[Instance]: result = list( self._dataset_reader.make_instances( qid=str(self._next_qid), question=json_dict["question"], answers=[], context=json_dict["context"], first_answer_offset=None, ) ) self._next_qid += 1 return result @overrides def _batch_json_to_instances(self, json_dicts: List[JsonDict]) -> List[Instance]: instances = [] for json_dict in json_dicts: instances.extend(self._json_to_instances(json_dict)) return instances @overrides def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]: instances = self._batch_json_to_instances(inputs) result = self.predict_batch_instance(instances) assert len(result) == len(inputs) return result @overrides def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]: outputs = self._model.forward_on_instances(instances) # group outputs with the same question id qid_to_output: Dict[str, Dict[str, Any]] = {} for instance, output in zip(instances, outputs): qid = instance["metadata"]["id"] output["id"] = qid output["answers"] = instance["metadata"]["answers"] if qid in qid_to_output: old_output = qid_to_output[qid] if old_output["best_span_scores"] < output["best_span_scores"]: qid_to_output[qid] = output else: qid_to_output[qid] = output return [sanitize(o) for o in qid_to_output.values()] ``` #### File: tests/coref/conll_coref_scores_test.py ```python import torch from allennlp.common.testing import AllenNlpTestCase, multi_device from allennlp_models.coref.conll_coref_scores import ConllCorefScores class ConllCorefScoresTest(AllenNlpTestCase): @multi_device def test_get_predicted_clusters(self, device: str): top_spans = torch.tensor([[0, 1], [4, 6], [8, 9]], device=device) antecedent_indices = torch.tensor([[-1, -1, -1], [0, -1, -1], [0, 1, -1]], device=device) predicted_antecedents = torch.tensor([-1, -1, 1], device=device) clusters, mention_to_cluster = ConllCorefScores.get_predicted_clusters( top_spans, antecedent_indices, predicted_antecedents ) assert len(clusters) == 1 assert set(clusters[0]) == {(4, 6), (8, 9)} assert mention_to_cluster == {(4, 6): clusters[0], (8, 9): clusters[0]} ``` #### File: tests/coref/interpret_test.py ```python from pytest import approx from allennlp.common.testing import AllenNlpTestCase from allennlp.interpret.saliency_interpreters import SimpleGradient from allennlp.models.archival import load_archive from allennlp.predictors import Predictor from tests import FIXTURES_ROOT class TestInterpret(AllenNlpTestCase): def test_simple_gradient_coref(self): inputs = { "document": "This is a single string document about a test. Sometimes it " "contains coreferent parts." } archive = load_archive(FIXTURES_ROOT / "coref" / "serialization" / "model.tar.gz") predictor = Predictor.from_archive(archive, "coreference-resolution") interpreter = SimpleGradient(predictor) interpretation = interpreter.saliency_interpret_from_json(inputs) assert interpretation is not None assert "instance_1" in interpretation assert "grad_input_1" in interpretation["instance_1"] grad_input_1 = interpretation["instance_1"]["grad_input_1"] assert len(grad_input_1) == 22 # 22 wordpieces in input # two interpretations should be identical for gradient repeat_interpretation = interpreter.saliency_interpret_from_json(inputs) repeat_grad_input_1 = repeat_interpretation["instance_1"]["grad_input_1"] for grad, repeat_grad in zip(grad_input_1, repeat_grad_input_1): assert grad == approx(repeat_grad) ``` #### File: rc/qanet/qanet_encoder_test.py ```python import torch from allennlp.common.testing import AllenNlpTestCase from allennlp.common.params import Params from allennlp_models.rc.qanet import QaNetEncoder class QaNetEncoderTest(AllenNlpTestCase): def test_qanet_encoder_can_build_from_params(self): params = Params( { "input_dim": 16, "hidden_dim": 16, "attention_projection_dim": 16, "feedforward_hidden_dim": 16, "num_blocks": 2, "num_convs_per_block": 2, "conv_kernel_size": 3, "num_attention_heads": 4, "dropout_prob": 0.1, "layer_dropout_undecayed_prob": 0.1, "attention_dropout_prob": 0, } ) encoder = QaNetEncoder.from_params(params) assert isinstance(encoder, QaNetEncoder) assert encoder.get_input_dim() == 16 assert encoder.get_output_dim() == 16 def test_qanet_encoder_runs_forward(self): encoder = QaNetEncoder( input_dim=16, hidden_dim=16, attention_projection_dim=16, feedforward_hidden_dim=16, num_blocks=2, num_convs_per_block=2, conv_kernel_size=3, num_attention_heads=4, dropout_prob=0.1, layer_dropout_undecayed_prob=0.1, attention_dropout_prob=0.1, ) inputs = torch.randn(2, 12, 16) assert list(encoder(inputs).size()) == [2, 12, 16] ```
{ "source": "jens321/CBRAIN-CAM", "score": 2 }
#### File: MAPS/vae/sample_cifar_10.py ```python import argparse import json import numpy as np import matplotlib.pyplot as plt import keras from keras import layers from keras import backend as K from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.manifold import TSNE from tensorflow.keras.datasets.cifar10 import load_data from train_cifar_10 import encoder_gen, decoder_gen def sample_reconstructions(vae, train_data, test_data, config_file: str): """ TODO """ # get random sample original_samples = [] recon_samples = [] min_max = [] input_dim = 32 * 32 * 3 for i in range(5): rand_sample = np.random.randint(0, len(train_data)) sample = train_data[rand_sample] sample_mean_var = vae.predict(np.expand_dims(sample, 0)) sample_mean = sample_mean_var[0, :input_dim] sample_log_var = sample_mean_var[0, input_dim:] recon_sample = np.random.multivariate_normal(sample_mean, 0 * np.identity(input_dim)) sample *= 255 recon_sample *= 255 sample = np.rint(sample).astype(int) recon_sample = np.rint(recon_sample).astype(int) # print("original sample", sample.reshape((input_dim,))) # print("reconstructed sample", recon_sample) # print(np.max(np.abs(sample.reshape((input_dim,)) - recon_sample))) max_reconstructed = np.max(np.abs(recon_sample)) print("max of reconstructed", max_reconstructed) # max_sample = np.max(sample.reshape((input_dim,))) # print("max of original", max_sample) # min_reconstructed = np.min(recon_sample) # print("min of reconstructed", min_reconstructed) # min_sample = np.min(sample.reshape((input_dim,))) # print("min of original", min_sample) recon_sample = recon_sample.reshape((32, 32, 3)) print("*****", sample.shape) original_samples.append(sample) recon_samples.append(recon_sample) # min_max.append((min(min_reconstructed, min_sample), max(max_reconstructed, max_sample))) fig, axs = plt.subplots(5, 2) for i in range(5): # vmin = min_max[i][0] # vmax = min_max[i][1] sub_img = axs[i, 0].imshow(original_samples[i]) fig.colorbar(sub_img, ax=axs[i, 0]) # ax.set_title("Original Sample") sub_img = axs[i, 1].imshow(recon_samples[i]) fig.colorbar(sub_img, ax=axs[i, 1]) # # ax.set_title("Reconstructed Sample") plt.savefig('./model_graphs/reconstructed_train_samples_{}.png'.format(config_file)) def sample_latent_space(vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels): """ TODO """ # Predict latent train data _, _, z_train = vae_encoder.predict(train_data) # Train scaler on latent train data sc = StandardScaler() z_train_std = sc.fit_transform(z_train) # Train TSNE on latent train data tsne = TSNE(n_components=2) # z_train_pca = tsne.fit_transform(z_train_std) # Predict latent test data _, _, z_test = vae_encoder.predict(test_data) # Apply scaling and tsne from train to test data z_test_std = sc.transform(z_test) z_test_tsne = tsne.fit_transform(z_test_std) # Make plot of latent test data plt.scatter(x=z_test_std[:, 0], y=z_test_std[:, 1], c=test_labels) plt.colorbar() plt.savefig('./model_graphs/latent_space_{}.png'.format(config_file)) def main(): args = argument_parsing() print("Command line args:", args) f = open("./model_config/{}.json".format(args.config_file)) model_config = json.load(f) f.close() (train_data, _), (test_data, _) = load_data() img_width = train_data.shape[1] img_height = train_data.shape[2] img_depth = train_data.shape[3] print("Image shape:", img_width, img_height, img_depth) train_data = train_data/255 test_data = test_data/255 # Construct VAE Encoder encoder_result = encoder_gen((img_width, img_height, img_depth), model_config["encoder"]) # Construct VAE Decoder vae_decoder = decoder_gen( (img_width, img_height, img_depth), model_config["decoder"], encoder_result.shape_before_flattening ) _, _, z = encoder_result.vae_encoder(encoder_result.inputs) x_mu = vae_decoder(z) vae = keras.Model(inputs=[encoder_result.inputs], outputs=[x_mu]) # load weights from file vae.load_weights('./models/model_{}.th'.format(args.config_file)) print("weights loaded") # get side by side plots of original vs. reconstructed sample_reconstructions(vae, train_data, test_data, args.config_file) # sample_latent_space(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels) def argument_parsing(): parser = argparse.ArgumentParser() parser.add_argument('--config_file', type=str, help='This option specifies the id of the config file to use to train the VAE.') args = parser.parse_args() return args if __name__ == "__main__": main() ```
{ "source": "jensadne/brixdb", "score": 3 }
#### File: brixdb/service/bricksnpieces.py ```python import requests def bricksnpieces_cookies(country='no'): """ Bricks&Pieces requires a couple of cookies to be set to fetch data. State 3 is sale. """ return {'csAgeAndCountry': '{"age":"18","countrycode":"%s"}' % country.upper(), 'csRpFlowState': '{"state":3}', 'country': country.upper(), 'AGE_GATE': 'grown_up'} def get_element_prices(element, desired_quantity=1, country='no'): """ Fetches price information for the given Element from Bricks&Pieces, which has its own set of stupidity. """ # obviously this requires at least one element id to work if not element.lego_ids: return {} # TODO: find a good way of storing the currently active element id in case # it's not the highest number element_id = sorted(element.lego_ids, reverse=True)[0] cookies = bricksnpieces_cookies(country=country) # url = 'https://www.lego.com/nb-no/service/rpservice/getitemordesign?issalesflow=true&itemordesignnumber={item}' args = [element_id, country.upper()] url = 'https://bricksandpieces.services.lego.com/api/v1/bricks/items/{}?country={}&orderType=buy'.format(*args) response = requests.get(url.format(item=element_id), cookies=cookies) if response.status_code != 200: return [] item = response.json()['Bricks'][0] return {'price': item['Price'], 'quantity': item['SQty']} def get_part_prices(design_id): """ Get all prices for a design id. """ item = response.json()['Bricks'] return data ``` #### File: brixdb/brixdb/tasks.py ```python from celery import shared_task @shared_task def bricklink_sync(): """ Downloads and imports all relevant data from Bricklink's catalog """ # step 1: get categories # step 2: get colours # step 3: get sets # step 4: get parts # step 5: get element mappings # step 6 get item inventories ``` #### File: brixdb/brixdb/views.py ```python from django.http import JsonResponse from django.shortcuts import get_object_or_404 from django.utils.translation import ugettext as _ from django.views.generic.detail import DetailView from django.views.decorators.http import require_POST from rest_framework import viewsets from rest_framework.decorators import action from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from . import models, serializers from .forms import SimpleIntegerForm from .models import Colour, Element, Set, Part from .service import bricklink, bricksnpieces import q class SetViewSet(viewsets.ReadOnlyModelViewSet): lookup_field = 'number' queryset = Set.objects.all() serializer_class = serializers.SetSerializer class PartViewSet(viewsets.ReadOnlyModelViewSet): lookup_field = 'number' queryset = Part.objects.all().prefetch_related('elements', 'elements__colour') serializer_class = serializers.PartDetailSerializer class ColourViewSet(viewsets.ReadOnlyModelViewSet): lookup_field = 'slug' queryset = Colour.objects.all() def get_serializer_class(self): if self.action == 'retrieve': return serializers.ColourDetailSerializer return serializers.ColourSerializer class ElementViewSet(viewsets.ReadOnlyModelViewSet): lookup_field = 'pk' queryset = Element.objects.all() serializer_class = serializers.ElementSerializer @action(methods=['get'], detail=False, permission_classes=[IsAuthenticated]) def owned(self, request): elements = Element.objects.owned_by(request.user).select_related('part', 'colour') serializer = self.get_serializer(elements, many=True) return Response(serializer.data) # TODO: ensure we can get a pk from frontend and make this a detail action @action(methods=['get'], detail=False, permission_classes=[IsAuthenticated]) def bricklink_prices(self, request): """ Fetch cheapest price from Bricklink for the given Element. """ element = get_object_or_404(self.queryset, lego_ids__contains=q(int(request.query_params.get('element', 0)))) prices = bricklink.get_element_prices(element) # we only care about the cheapest for this check return Response(prices[0]) # TODO: ensure we can get a pk from frontend and make this a detail action @action(methods=['get'], detail=False, permission_classes=[IsAuthenticated]) def bricksnpieces_prices(self, request): """ Fetch B&P price for the given Element """ element = get_object_or_404(self.queryset, lego_ids__contains=q(int(request.query_params.get('element', 0)))) price = bricksnpieces.get_element_prices(element) return Response(price) class BnPElementViewSet(viewsets.ModelViewSet): lookup_field = 'tlg_element_id' queryset = models.BnPElement.objects.all().with_price().available().default_related() serializer_class = serializers.BnPElementSerializer def get_queryset(self): qs = self.queryset colour_slug = self.request.query_params.get('colour', None) category_slug = self.request.query_params.get('category', None) qs = qs.by_colour(colour_slug) if colour_slug else qs qs = qs.by_category(category_slug) if category_slug else qs return qs @require_POST def add_set_owned(request, set_number): _set = get_object_or_404(Set, number=set_number) f = SimpleIntegerForm(request.POST) if not f.is_valid(): return JsonResponse({'result': _('Invalid input data')}, status_code=400) return JsonResponse({'result': _('You now own %d of this set') % request.user.sets_owned.filter(owned_set=_set).count()}) class SetView(DetailView): model = Set template_name = 'brixdb/set_detail.html' context_object_name = 'set' slug_field = 'number' def get_context_data(self, object): context = super(SetView, self).get_context_data(object=object) context['inventory'] = object.inventory.select_related('element', 'element__part', 'element__colour') return context ```
{ "source": "jensadne/xhtml2pdf", "score": 3 }
#### File: xhtml2pdf/tests/test_reportlab.py ```python import unittest from xhtml2pdf import xhtml2pdf_reportlab class PTCycleTest(unittest.TestCase): def test_init(self): xhtml2pdf_reportlab.PTCycle() def test_cyclicIterator(self): ptcycle = xhtml2pdf_reportlab.PTCycle() ptcycle.extend(range(10)) for ele in ptcycle: pass class PmlMaxHeightMixInTest(unittest.TestCase): def test_setMaxHeight_height_lt_70000(self): pmlmaxheightmixin = xhtml2pdf_reportlab.PmlMaxHeightMixIn() pmlmaxheightmixin.setMaxHeight(69999) # def test_setMaxHeight_height_lt_70000_and_canv(self): # pmlmaxheightmixin = xhtml2pdf_reportlab.PmlMaxHeightMixIn() # pmlmaxheightmixin.setMaxHeight(69999) # def test_setMaxHeight_height_lt_70000_and_canv_with_height(self): # pmlmaxheightmixin = xhtml2pdf_reportlab.PmlMaxHeightMixIn() # pmlmaxheightmixin.setMaxHeight(69999) def test_setMaxHeight_height_gte_70000(self): pmlmaxheightmixin = xhtml2pdf_reportlab.PmlMaxHeightMixIn() pmlmaxheightmixin.setMaxHeight(70000) def test_getMaxHeight(self): pmlmaxheightmixin = xhtml2pdf_reportlab.PmlMaxHeightMixIn() self.assertEqual(0, pmlmaxheightmixin.getMaxHeight()) pmlmaxheightmixin.availHeightValue = 42 self.assertEqual(42, pmlmaxheightmixin.getMaxHeight()) if __name__ == "__main__": unittest.main() ``` #### File: xhtml2pdf/tests/test_tags.py ```python import unittest from xml.dom import minidom from xhtml2pdf import tags from xhtml2pdf.parser import AttrContainer class PisaTagTestCase(unittest.TestCase): def test_pisa_tag_will_set_attrs_on_init(self): dom = minidom.parseString("<unit>test</unit>") element = dom.getElementsByTagName("unit")[0] attrs = AttrContainer({}) instance = tags.pisaTag(element, attrs) self.assertEqual(instance.node, element) self.assertEqual(instance.tag, "unit") self.assertEqual(instance.attr, {}) ```
{ "source": "jensakut/CarND-Capstone", "score": 3 }
#### File: src/waypoint_updater/waypoint_updater.py ```python import rospy from geometry_msgs.msg import PoseStamped from styx_msgs.msg import Lane, Waypoint from std_msgs.msg import Int32 from geometry_msgs.msg import TwistStamped import math import copy ''' This node will publish waypoints from the car's current position to some `x` distance ahead. As mentioned in the doc, you should ideally first implement a version which does not care about traffic lights or obstacles. Once you have created dbw_node, you will update this node to use the status of traffic lights too. Please note that our simulator also provides the exact location of traffic lights and their current status in `/vehicle/traffic_lights` message. You can use this message to build this node as well as to verify your TL classifier. TODO (for Yousuf and Aaron): Stopline location for each traffic light. ''' LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number class WaypointUpdater(object): """This class takes all track waypoints, current car position and generate next LOOKAHEAD_WPS number waypoints""" def __init__(self): rospy.init_node('waypoint_updater') rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb) rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb) rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb) rospy.Subscriber('/current_velocity', TwistStamped, self.cVelocity_cb) self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1) self.close_waypointN_pub = rospy.Publisher('close_waypoint_n', Int32, queue_size=1) self.allWPs = None # List for all track waypoints self.finalWPS = Lane() # Prepare message for publication self.finalWPS.header.seq = 0 self.finalWPS.header.frame_id = "/world" self.trafficWPStopN = -1 self.brakeDist = 30. self.traficLineStopDist = 3. self.closesetPointN = None rospy.spin() def pose_cb(self, msg): """Callback for '/current_pose' message receive, also publishes 'final_waypoints' message """ if not self.allWPs: return # Find closest waypoint near the car self.closesetPointN = self.findClosestWaypoint(msg.pose.position, self.closesetPointN) # Detect if we have already passed this point and choose next point in this case if self.closesetPointN + 1 < len(self.allWPs): # if this waypoint is not the last one v_path_x = self.allWPs[self.closesetPointN + 1].pose.pose.position.x - \ self.allWPs[self.closesetPointN].pose.pose.position.x v_path_y = self.allWPs[self.closesetPointN + 1].pose.pose.position.y - \ self.allWPs[self.closesetPointN].pose.pose.position.y c_path_x = msg.pose.position.x - self.allWPs[self.closesetPointN].pose.pose.position.x c_path_y = msg.pose.position.y - self.allWPs[self.closesetPointN].pose.pose.position.y # if dot product is more than zero we choose next waypoint: dp = v_path_x*c_path_x + v_path_y*c_path_y if dp > 0: self.closesetPointN = self.closesetPointN + 1 # Update final waypoints message self.finalWPS.header.seq += 1 self.finalWPS.header.stamp = rospy.rostime.Time().now() self.finalWPS.waypoints = self.allWPs[self.closesetPointN : min(self.closesetPointN+LOOKAHEAD_WPS, len(self.allWPs))] # if there are traffic light ahead we turn on speed control if self.trafficWPStopN >= self.closesetPointN: for wpn in range(len(self.finalWPS.waypoints)-1, -1, -1): if wpn + self.closesetPointN >= self.trafficWPStopN: self.finalWPS.waypoints[wpn].twist.twist.linear.x = 0 # set speed of points after stop line to 0 else: distanceToStopLine = self.dists[self.trafficWPStopN] - self.dists[wpn + self.closesetPointN] # set speed of few points before stop line to 0 if distanceToStopLine < self.traficLineStopDist: self.finalWPS.waypoints[wpn].twist.twist.linear.x = 0 # set speed of points farther than self.brakeDist + self.traficLineStopDist to original value elif distanceToStopLine > self.brakeDist + self.traficLineStopDist : self.finalWPS.waypoints[wpn].twist.twist.linear.x = self.originalSpeeds[wpn+self.closesetPointN] # Set other points speed proportional to distance to stop point else: self.finalWPS.waypoints[wpn].twist.twist.linear.x = \ self.originalSpeeds[wpn+self.closesetPointN] * (distanceToStopLine - self.traficLineStopDist) / self.brakeDist # if there are no traffic light ahead we restore original speed value else: for wpn in range(len(self.finalWPS.waypoints)): self.finalWPS.waypoints[wpn].twist.twist.linear.x = self.originalSpeeds[wpn+self.closesetPointN] self.final_waypoints_pub.publish(self.finalWPS) self.close_waypointN_pub.publish(self.closesetPointN) def waypoints_cb(self, waypoints): """Callback for '/base_waypoints' messages.""" # Copy original speeds self.originalSpeeds = [wp.twist.twist.linear.x for wp in waypoints.waypoints] # Calculate cumulative distances for all points self.dists = [0] for wpn in range(1, len(waypoints.waypoints)): self.dists.append(self.dists[-1] + self.length(waypoints.waypoints[wpn].pose.pose.position, waypoints.waypoints[wpn-1].pose.pose.position)) self.allWPs = waypoints.waypoints def traffic_cb(self, msg): self.trafficWPStopN = msg.data pass def cVelocity_cb(self, twistStamped): self.curentSpeed = twistStamped.twist.linear.x # def obstacle_cb(self, msg): # # TODO: Callback for /obstacle_waypoint message. We will implement it later # pass # def get_waypoint_velocity(self, waypoint): # return waypoint.twist.twist.linear.x # def set_waypoint_velocity(self, waypoints, waypoint, velocity): # waypoints[waypoint].twist.twist.linear.x = velocity def length(self, a, b): """ Calculate distance between two points.""" return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2) # def distance(self, waypoints, wp1, wp2): # dist = 0 # for i in range(wp1, wp2+1): # dist += self.length(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position) # wp1 = i # return dist def findClosestWaypoint(self, currentPosition, previousWayPointN = None): closesetPointDist = float("inf") closesetPointN = -1 if previousWayPointN is None: startSearchWayPointN = 0 else: startSearchWayPointN = previousWayPointN # Look for closest waypoint starting from previous closest waypoint upto ... for wpn in range(startSearchWayPointN, len(self.allWPs)): d = self.length(self.allWPs[wpn].pose.pose.position, currentPosition) if d < closesetPointDist: closesetPointDist = d closesetPointN = wpn elif previousWayPointN is not None: # ... upto the moment when distance starts to increase or till the end # rospy.loginfo("n: {}, x1: {}, y1: {}, xc: {}, yc: {}"\ # .format(closesetPointN, self.allWPs[closesetPointN].pose.pose.position.x, \ # self.allWPs[closesetPointN].pose.pose.position.y, \ # msg.pose.position.x, msg.pose.position.y)) return closesetPointN return closesetPointN if __name__ == '__main__': try: WaypointUpdater() except rospy.ROSInterruptException: rospy.logerr('Could not start waypoint updater node.') ```
{ "source": "jensakut/Reinforced_Bananas_DQN", "score": 3 }
#### File: Reinforced_Bananas_DQN/dqn/watch.py ```python from collections import deque import numpy as np import torch # get arguments from unityagents import UnityEnvironment from .Agent import Agent from .arguments import get_args def dqn(agent, args): """Deep Q-Learning. Args args: defined in arguments.py """ ############################## # init ############################# scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores # have some plotting object that captures the scores eps = args.eps_start # initialize epsilon ############################# # run the episodes ############################# for i_episode in range(1, args.n_episodes + 1): env_info = env.reset(train_mode=args.train)[brain_name] state = env_info.vector_observations[0] score = 0 for t in range(args.max_t): # always greedy, show the best :-) action = agent.act(state, 0.0) env_info = env.step(action)[brain_name] next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished eps = max(args.eps_end, args.eps_decay * eps) # decrease epsilon scores_window.append(score) break print('\rEpisode {}\tAverage Score: {:.2f}\tScore: {:.2f}'.format(i_episode, np.mean(scores_window), score), end="") args = get_args() args.train = False args.n_episodes = 100 # initialize the environment env = UnityEnvironment(file_name=args.sim_dir) # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # reset the environment env_info = env.reset(train_mode=args.train)[brain_name] # number of agents in the environment print('Number of agents:', len(env_info.agents)) # number of actions action_size = brain.vector_action_space_size print('Number of actions:', action_size) # examine the state space state = env_info.vector_observations[0] print('States look like:', state) state_size = len(state) print('States have length:', state_size) agent = Agent(args, state_size=state_size, action_size=action_size, filename='') # load best weights agent.qnetwork_local.load_state_dict(torch.load('local_network_best_15_400_episodes.pth')) dqn(agent, args) ```
{ "source": "jensakut/Reinforced-Reacher-MADDPG", "score": 2 }
#### File: Reinforced-Reacher-MADDPG/maddpg/train.py ```python import time from collections import deque from itertools import count import numpy as np import torch from unityagents import UnityEnvironment from maddpg.ddpg_agent import Agent from maddpg.parameters import ParReacher from maddpg.plotting import Plotting def ddpg_unity(env, agent, brain_name, num_agents, plotting, par, n_episodes=200): """Train DDPG Agent Params ====== env (object): Unity environment instance agent (DDPGMultiAgent): agent instance brain_name (string): name of brain num_agents (int): number of agents plotting (Plotting): object to plot n_episodes (int): number of episodes to train the network """ # for naming the weights experiment_name = plotting.fname not_solved = True scores_deque = deque(maxlen=100) scores = [] best_score = -np.Inf for i_episode in range(1, n_episodes + 1): env_info = env.reset(train_mode=True)[brain_name] # reset the environment states = env_info.vector_observations # get the current state (for each agent) score = np.zeros(num_agents) # initialize the score (for each agent) t_s = time.time() for timestep in count(): actions = agent.act(states) env_info = env.step(actions)[brain_name] # send all actions to tne environment next_states = env_info.vector_observations # get next state (for each agent) rewards = env_info.rewards # get reward (for each agent) dones = env_info.local_done # see if episode finished score += env_info.rewards # update the score (for each agent) agent.step(states, actions, rewards, next_states, dones, timestep) states = next_states # roll over states to next time step if np.any(dones): # exit loop if episode finished plotting.add_measurement(score_per_agent=score, eps=agent.epsilon) break ctime = time.time() - t_s scores_deque.append(np.mean(score)) scores.append(score) best_score = max(best_score, np.mean(scores_deque)) print('\rEpisode {}\tAverage Score: {:.2f}\tScore: {:.2f}, Min Score: {:.2f}, Max Score: {:.2f}, ' 'episode_dur {:.2f}'.format(i_episode, np.mean(scores_deque), np.mean(score), np.min(score), np.max(score), ctime), end="") if i_episode % 5 == 0: save_network_weights(agent, experiment_name, i_episode) plotting.plotting(par) print('\rEpisode {}\tAverage Score: {:.2f}\tScore: {:.2f}, Min Score: {:.2f}, Max Score: {:.2f}, ' 'episode_dur {:.2f}'.format( i_episode, np.mean(scores_deque), np.mean(score), np.min(score), np.max(score), ctime)) if np.mean(scores_deque) >= 30.0 and not_solved: print("environment solved in {}".format(i_episode)) not_solved = False # save at last plotting.plotting(par) save_network_weights(agent, experiment_name, par.num_episodes) return scores def save_network_weights(agent, experiment_name, i_episode): torch.save(agent.actor_local.state_dict(), experiment_name + '_checkpoint_actor_' + str(i_episode) + '.pth') torch.save(agent.critic_local.state_dict(), experiment_name + '_checkpoint_critic_' + str(i_episode) + '.pth') def train_in_unity_env(): # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # reset the environment env_info = env.reset(train_mode=False)[brain_name] # number of agents num_agents = len(env_info.agents) # size of each action action_size = brain.vector_action_space_size # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {} and has {} actions.'.format(states.shape[0], state_size, action_size)) plotting = Plotting(par) agent = Agent(state_size, action_size, par) ddpg_unity(env, agent, brain_name, num_agents, plotting, par, n_episodes=200) env.close() # main function if __name__ == "__main__": # Get parameters for ddpg from config object # par = ParCrawler() par = ParReacher() env = UnityEnvironment(file_name=par.file_name) # load and train within the unity environment train_in_unity_env() ```
{ "source": "jensanjo/QuadrupedRobot", "score": 3 }
#### File: Example/display/demo.py ```python import os import sys from PIL import Image sys.path.append("/home/ubuntu/Robotics/QuadrupedRobot") sys.path.extend([os.path.join(root, name) for root, dirs, _ in os.walk("/home/ubuntu/Robotics/QuadrupedRobot") for name in dirs]) from Mangdang.LCD.ST7789 import ST7789 def main(): """ The demo for picture show """ # init st7789 device disp = ST7789() disp.begin() disp.clear() # show exaple picture image=Image.open("./dog.png") image.resize((320,240)) disp.display(image) main() ``` #### File: StanfordQuadruped/src/ActuatorControl.py ```python import os import sys import time class ActuatorControl: def __init__(self,pwm_number): self.pwm_number = pwm_number def updateDutyCycle(self,angle): duty_cycle = int((1.11*angle+50)*10000) return duty_cycle def updateActuatorAngle(self,angle): if self.pwm_number == 1: actuator_name = 'pwm1' elif self.pwm_number == 2: actuator_name = 'pwm2' elif self.pwm_number == 3: actuator_name = 'pwm3' duty_cycle = self.updateDutyCycle(angle) file_node = '/sys/class/pwm/pwmchip0/' + actuator_name+ '/duty_cycle' f = open(file_node, "w") f.write(str(duty_cycle)) #test = ActuatorControl(3) #time.sleep(10) #for index in range(30): # test.updateActuatorAngle(index*3) # time.sleep(0.1) # test.updateActuatorAngle(0) ``` #### File: StanfordQuadruped/src/Command.py ```python import numpy as np class Command: """Stores movement command """ def __init__(self): self.horizontal_velocity = np.array([0, 0]) self.yaw_rate = 0.0 self.height = -0.07 self.pitch = 0.0 self.roll = 0.0 self.activation = 0 self.hop_event = False self.trot_event = False self.activate_event = False self.dance_activate_event = False self.dance_switch_event = False self.gait_switch_event = False self.shutdown_signal = False ``` #### File: StanfordQuadruped/src/Utilities.py ```python import numpy as np def deadband(value, band_radius): return max(value - band_radius, 0) + min(value + band_radius, 0) def clipped_first_order_filter(input, target, max_rate, tau): rate = (target - input) / tau return np.clip(rate, -max_rate, max_rate) ```
{ "source": "JensAstrup/pyEchosign", "score": 3 }
#### File: pyEchosign/classes/documents.py ```python import logging from io import IOBase, FileIO, BytesIO from typing import TYPE_CHECKING, Union import arrow import requests from pyEchosign.exceptions.internal import ApiError from pyEchosign.utils.handle_response import check_error, response_success from pyEchosign.utils.request_parameters import get_headers log = logging.getLogger('pyEchosign.' + __name__) if TYPE_CHECKING: from .account import EchosignAccount __all__ = ['TransientDocument'] class TransientDocument(object): """ A document which can be used in Agreements - is deleted by Echosign after 7 days. The TransientDocument is created in Echosign on instantiation. Args: account: The :class:`EchosignAccount <pyEchosign.classes.account.EchosignAccount>` to be associated with this document file_name (str): The name of the file file: The actual file object to upload to Echosign, accepts a stream of bytes. mime_type: (optional) The MIME type of the file. Echosign will infer the type from the file extension if not provided. Attributes: file_name: The name of the file file: The actual file object to upload to Echosign mime_type: The MIME type of the file document_id: The ID provided by Echosign, used to reference it in creating agreements expiration_date: The date Echosign will delete this document (not provided by Echosign, calculated for convenience) """ def __init__(self, account, file_name, file, mime_type=None): # type: (EchosignAccount, str, Union[IOBase, FileIO, BytesIO], str) -> None self.file_name = file_name self.file = file self.mime_type = mime_type self.document_id = None self.expiration_date = None # With file data provided, make request to Echosign API for transient document url = account.api_access_point + 'transientDocuments' # Create post_data file_tuple = (file_name, file) # Only add the mime type if provided if mime_type is not None: file_tuple = file_tuple + (mime_type, ) files = dict(File=file_tuple) r = requests.post(url, headers=get_headers(account.access_token, content_type=None), files=files) if response_success(r): log.debug('Request to create document {} successful.'.format(self.file_name)) response_data = r.json() self.document_id = response_data.get('transientDocumentId', None) # If there was no document ID, something went wrong if self.document_id is None: log.error('Did not receive a transientDocumentId from Echosign. Received: {}'.format(r.content)) raise ApiError('Did not receive a Transient Document ID from Echosign') else: today = arrow.now() # Document will expire in 7 days from creation self.expiration_date = today.replace(days=+7).datetime else: try: log.error('Error encountered creating document {}. Received message: {}'. format(self.file_name, r.content)) finally: check_error(r) def __str__(self): return self.file_name class AgreementDocument(object): """ Represents a document used in an Agreement. Attributes: echosign_id: The ID of the Document which can be used to retrieve its file stream mime_type: The MIME type of the document name: The name of the document page_count: The number of pages in the document supporting_document: Whether or not this document is a "supporting document" as specified by the API field_name: If a supporting document, what the name is of the supporting document field """ def __init__(self, echosign_id, mime_type, name, page_count, supporting_document=False, field_name=None): # type: (str, str, str, int, bool, str) -> None self.echosign_id = echosign_id self.mime_type = mime_type self.name = name self.page_count = page_count self.supporting_document = supporting_document self.field_name = field_name def __str__(self): return 'AgreementDocument: {}'.format(self.name) def __repr__(self): return 'AgreementDocument: {}'.format(self.name) ```
{ "source": "JensAstrup/pyOutlook", "score": 3 }
#### File: pyOutlook/core/folder.py ```python import requests from pyOutlook.internal.utils import check_response __all__ = ['Folder'] class Folder(object): """An object representing a Folder in the OutlookAccount provided. Attributes: account: The :class:`OutlookAccount <pyOutlook.core.main.OutlookAccount>` this folder should be associated with id: The static id generated by Outlook to identify this folder. folder_name: The name of this folder as displayed in the account parent_id: The id of the folder which houses this Folder object child_folder_count: The number of child folders inside this Folder unread_count: The number of unread messages inside this Folder total_items: A sum of all items inside Folder """ def __init__(self, account, folder_id, folder_name, parent_id, child_folder_count, unread_count, total_items): self.account = account self.parent_id = parent_id self.child_folder_count = child_folder_count self.unread_count = unread_count self.total_items = total_items self.name = folder_name self.id = folder_id def __str__(self): return self.name def __repr__(self): return str(self) @property def headers(self): return {"Authorization": "Bearer " + self.account.access_token, "Content-Type": "application/json"} @classmethod def _json_to_folder(cls, account, json_value): return Folder(account, json_value['Id'], json_value['DisplayName'], json_value['ParentFolderId'], json_value['ChildFolderCount'], json_value['UnreadItemCount'], json_value['TotalItemCount']) @classmethod def _json_to_folders(cls, account, json_value): return [cls._json_to_folder(account, folder) for folder in json_value['value']] def rename(self, new_folder_name): """Renames the Folder to the provided name. Args: new_folder_name: A string of the replacement name. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token. Returns: A new Folder representing the folder with the new name on Outlook. """ headers = self.headers endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id payload = '{ "DisplayName": "' + new_folder_name + '"}' r = requests.patch(endpoint, headers=headers, data=payload) if check_response(r): return_folder = r.json() return self._json_to_folder(self.account, return_folder) def get_subfolders(self): """Retrieve all child Folders inside of this Folder. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token. Returns: List[:class:`Folder <pyOutlook.core.folder.Folder>`] """ headers = self.headers endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/childfolders' r = requests.get(endpoint, headers=headers) if check_response(r): return self._json_to_folders(self.account, r.json()) def delete(self): """Deletes this Folder. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token. """ headers = self.headers endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id r = requests.delete(endpoint, headers=headers) check_response(r) def move_into(self, destination_folder): # type: (Folder) -> Folder """Move the Folder into a different folder. This makes the Folder provided a child folder of the destination_folder. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token. Args: destination_folder: A :class:`Folder <pyOutlook.core.folder.Folder>` that should become the parent Returns: A new :class:`Folder <pyOutlook.core.folder.Folder>` that is now inside of the destination_folder. """ headers = self.headers endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/move' payload = '{ "DestinationId": "' + destination_folder.id + '"}' r = requests.post(endpoint, headers=headers, data=payload) if check_response(r): return_folder = r.json() return self._json_to_folder(self.account, return_folder) def copy_into(self, destination_folder): # type: (Folder) -> Folder """Copies the Folder into the provided destination folder. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token. Args: destination_folder: The Folder that this Folder should be copied to. Returns: A new :class:`Folder <pyOutlook.core.folder.Folder>` representing the newly created folder. """ headers = self.headers endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/copy' payload = '{ "DestinationId": "' + destination_folder.id + '"}' r = requests.post(endpoint, headers=headers, data=payload) if check_response(r): return_folder = r.json() return self._json_to_folder(self.account, return_folder) def create_child_folder(self, folder_name): """Creates a child folder within the Folder it is called from and returns the new Folder object. Args: folder_name: The name of the folder to create Returns: :class:`Folder <pyOutlook.core.folder.Folder>` """ headers = self.headers endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/childfolders' payload = '{ "DisplayName": "' + folder_name + '"}' r = requests.post(endpoint, headers=headers, data=payload) if check_response(r): return_folder = r.json() return self._json_to_folder(self.account, return_folder) def messages(self): """ Retrieves the messages in this Folder, returning a list of :class:`Messages <pyOutlook.core.message.Message>`.""" headers = self.headers r = requests.get('https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id + '/messages', headers=headers) check_response(r) from pyOutlook.core.message import Message return Message._json_to_messages(self.account, r.json()) ``` #### File: pyOutlook/tests/message_tests.py ```python import base64 from unittest import TestCase try: from unittest.mock import patch, Mock except ImportError: from mock import Mock, patch from pyOutlook import OutlookAccount from pyOutlook.core.contact import Contact from pyOutlook.core.message import Message from tests.utils import sample_message class TestMessage(TestCase): @classmethod def setUpClass(cls): cls.mock_get_patcher = patch('pyOutlook.core.message.requests.get') cls.mock_get = cls.mock_get_patcher.start() cls.mock_patch_patcher = patch('pyOutlook.core.message.requests.patch') cls.mock_patch = cls.mock_patch_patcher.start() cls.mock_post_patcher = patch('pyOutlook.core.message.requests.post') cls.mock_post = cls.mock_post_patcher.start() cls.account = OutlookAccount('token') def test_json_to_message_format(self): """ Test that JSON is turned into a Message correctly """ mock_response = Mock() mock_response.json.return_value = sample_message mock_response.status_code = 200 self.mock_get.return_value = mock_response account = OutlookAccount('token') message = Message._json_to_message(account, sample_message) self.assertEqual(message.subject, 'Re: Meeting Notes') sender = Contact('<EMAIL>', '<NAME>') self.assertIsInstance(message.sender, Contact) self.assertEqual(message.sender.email, sender.email) self.assertEqual(message.sender.name, sender.name) def test_recipients_missing_json(self): """ Test that a response with no ToRecipients does not cause Message deserialization to fail """ json_message = { "Id": "AAMkAGI2THVSAAA=", "CreatedDateTime": "2014-10-20T00:41:57Z", "LastModifiedDateTime": "2014-10-20T00:41:57Z", "ReceivedDateTime": "2014-10-20T00:41:57Z", "SentDateTime": "2014-10-20T00:41:53Z", "Subject": "Re: Meeting Notes", "Body": { "ContentType": "Text", "Content": "\n\nFrom: Alex D\nSent: Sunday, October 19, 2014 5:28 PM\nTo: <NAME>\nSubject: " "Meeting Notes\n\nPlease send me the meeting notes ASAP\n" }, "BodyPreview": "\nFrom: Alex D\nSent: Sunday, October 19, 2014 5:28 PM\nTo: <NAME>\n" "Subject: Meeting Notes\n\nPlease send me the meeting notes ASAP", "Sender": { "EmailAddress": { "Name": "<NAME>", "Address": "<EMAIL>" } }, "From": { "EmailAddress": { "Name": "<NAME>", "Address": "<EMAIL>" } }, "CcRecipients": [], "BccRecipients": [], "ReplyTo": [], "ConversationId": "AAQkAGI2yEto=", "IsRead": False, 'HasAttachments': True } Message._json_to_message(self.account, json_message) def test_is_read_status(self): """ Test that the correct value is returned after changing the is_read status """ mock_patch = Mock() mock_patch.status_code = 200 self.mock_patch.return_value = mock_patch message = Message(self.account, 'test body', 'test subject', [], is_read=False) message.is_read = True self.assertTrue(message.is_read) def test_attachments_added(self): """ Test that attachments are added to Message in the correct format """ message = Message(self.account, '', '', []) message.attach('abc', 'Test/Attachment.csv') message.attach(b'some bytes', 'attached.pdf') self.assertEqual(len(message._attachments), 2) file_bytes = [attachment._content for attachment in message._attachments] file_names = [attachment.name for attachment in message._attachments] # The files are base64'd for the API some_bytes = base64.b64encode(b'some bytes') abc = base64.b64encode(b'abc') self.assertIn(some_bytes.decode('UTF-8'), file_bytes) self.assertIn(abc.decode('UTF-8'), file_bytes) self.assertIn('TestAttachment.csv', file_names) def test_message_sent_with_string_recipients(self): """ A list of strings or Contacts can be provided as the To/CC/BCC recipients """ mock_post = Mock() mock_post.status_code = 200 self.mock_post.return_value = mock_post message = Message(self.account, '', '', ['<EMAIL>']) message.send() def test_message_sent_with_contact_recipients(self): """ A list of strings or Contacts can be provided as the To/CC/BCC recipients """ mock_post = Mock() mock_post.status_code = 200 self.mock_post.return_value = mock_post message = Message(self.account, '', '', [Contact('<EMAIL>')]) message.send() def test_category_added(self): """ Test that Message.categories is updated in addition to the API call made """ mock_patch = Mock() mock_patch.status_code = 200 self.mock_patch.return_value = mock_patch message = Message(self.account, 'test body', 'test subject', [], categories=['A']) message.add_category('B') self.assertIn('A', message.categories) self.assertIn('B', message.categories) ```
{ "source": "jensb89/pyOTR", "score": 3 }
#### File: pyOTR/Src/system.py ```python from __future__ import print_function import platform import re import subprocess import os import logging class SystemInfo(object): def __init__(self): (system, node, release, version, machine, processor) = platform.uname() self.systemOS = system self.node = node self.release = release self.version = version self.machine = machine self.processor = processor if self.systemOS=="Linux": import pwd self.user = pwd.getpwuid(os.getuid()).pw_name else: self.user = os.getlogin() self.logger = logging.getLogger('pyOTR.system') def printSystemInfos(self): self.logger.info("User:\t\t%s" % self.user) self.logger.info("System OS:\t%s" % self.systemOS) self.logger.info("Name:\t\t%s" % self.node) self.logger.info("Machine:\t%s" % self.machine) self.logger.info("Version:\t%s" % self.version) self.logger.info("RAM:") self.printRam() def printRam(self): if self.systemOS == "Darwin": self.printRamDarwin() elif self.systemOS == "Linux": self.printRamLinux() def printRamLinux(self,indentTabs=2): output = subprocess.check_output(["free", "-m"]) p = re.compile('Mem:\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)') (total,used,free,shared,buff,availabe) = p.findall(output)[0] self.logger.info(indentTabs*"\t" + 'Total Memory:\t\t%s MB' % total) self.logger.info(indentTabs*"\t" + 'Used Memory:\t\t%s MB' % used) self.logger.info(indentTabs*"\t" + 'Free Memory:\t\t%s MB' % free) self.logger.info(indentTabs*"\t" + 'Shared Memory:\t\t%s MB' % shared) self.logger.info(indentTabs*"\t" + 'Availabe Memory:\t%s MB' % availabe) def printRamDarwin(self,indentTabs=2): # Ram under MacOS # https://apple.stackexchange.com/questions/4286/is-there-a-mac-os-x-terminal-version-of-the-free-command-in-linux-systems # Get process info ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0].decode() vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0].decode() # Iterate processes processLines = ps.split('\n') sep = re.compile('[\s]+') rssTotal = 0 # kB for row in range(1,len(processLines)): rowText = processLines[row].strip() rowElements = sep.split(rowText) try: rss = float(rowElements[0]) * 1024 except: rss = 0 # ignore... rssTotal += rss # Process vm_stat vmLines = vm.split('\n') sep = re.compile(':[\s]+') vmStats = {} for row in range(1,len(vmLines)-2): rowText = vmLines[row].strip() rowElements = sep.split(rowText) vmStats[(rowElements[0])] = int(rowElements[1].strip('\.')) * 4096 self.logger.info(indentTabs*"\t" + 'Wired Memory:\t\t%d MB' % ( vmStats["Pages wired down"]/1024/1024 )) self.logger.info(indentTabs*"\t" + 'Active Memory:\t\t%d MB' % ( vmStats["Pages active"]/1024/1024 )) self.logger.info(indentTabs*"\t" + 'Inactive Memory:\t%d MB' % ( vmStats["Pages inactive"]/1024/1024 )) self.logger.info(indentTabs*"\t" + 'Free Memory:\t\t%d MB' % ( vmStats["Pages free"]/1024/1024 )) self.logger.info(indentTabs*"\t" + 'Real Mem Total (ps):\t%.3f MB' % ( rssTotal/1024/1024 )) #test = SystemInfo() #test.printSystemInfos() ```
{ "source": "jensbjorgensen/libnacl", "score": 3 }
#### File: tests/unit/test_raw_random.py ```python import libnacl import unittest class TestRandomBytes(unittest.TestCase): def test_randombytes_random(self): self.assertIsInstance(libnacl.randombytes_random(), int) def test_randombytes_uniform(self): self.assertIsInstance(libnacl.randombytes_uniform(200), int) freq = {libnacl.randombytes_uniform(256): 1 for _ in range(65536)} self.assertEqual(256, len(freq)) self.assertTrue(all(freq.values())) def test_randombytes(self): 'copied from libsodium default/randombytes.c' data = libnacl.randombytes(65536) freq = {x: 1 for x in data} self.assertEqual(256, len(freq)) self.assertTrue(all(freq.values())) def test_randombytes_buf_deterministic(self): seed = libnacl.randombytes_buf(32) seed2 = libnacl.randombytes_buf(32) data = libnacl.randombytes_buf_deterministic(32, seed) data2 = libnacl.randombytes_buf_deterministic(32, seed) data3 = libnacl.randombytes_buf_deterministic(32, seed2) self.assertEqual(32, len(data)) self.assertEqual(32, len(data)) self.assertEqual(32, len(data)) self.assertEqual(data, data2) self.assertNotEqual(data, data3) def test_crypto_kdf_keygen(self): master_key = libnacl.crypto_kdf_keygen() freq = {x: 1 for x in master_key} self.assertEqual(32, len(master_key)) self.assertTrue(all(freq.values())) def test_crypto_kdf_derive_from_key(self): master_key = libnacl.crypto_kdf_keygen() subkey = libnacl.crypto_kdf_derive_from_key(16, 1, "Examples", master_key) subkey2 = libnacl.crypto_kdf_derive_from_key(16, 1, "Examples", master_key) subkey3 = libnacl.crypto_kdf_derive_from_key(16, 2, "Examples", master_key) self.assertEqual(16, len(subkey)) self.assertEqual(16, len(subkey2)) self.assertEqual(16, len(subkey3)) self.assertEqual(subkey, subkey2) self.assertNotEqual(subkey, subkey3) def test_crypto_kx_keypair(self): pk, sk = libnacl.crypto_kx_keypair() self.assertEqual(32, len(pk)) self.assertEqual(32, len(sk)) def test_crypto_kx_seed_keypair(self): seed = libnacl.randombytes_buf(32) seed2 = libnacl.randombytes_buf(32) pk, sk = libnacl.crypto_kx_seed_keypair(seed) pk2, sk2 = libnacl.crypto_kx_seed_keypair(seed) pk3, sk3 = libnacl.crypto_kx_seed_keypair(seed2) self.assertEqual(pk, pk2) self.assertNotEqual(pk, pk3) self.assertEqual(sk, sk2) self.assertNotEqual(sk, sk3) def test_crypto_kx_client_session_keys(self): client_pk, client_sk = libnacl.crypto_kx_keypair() server_pk, server_sk = libnacl.crypto_kx_keypair() rx, tx, status = libnacl.crypto_kx_client_session_keys(client_pk, client_sk, server_pk) rx2, tx2, status = libnacl.crypto_kx_client_session_keys(client_pk, client_sk, server_pk) self.assertEqual(32, len(rx)) self.assertEqual(32, len(tx)) self.assertEqual(rx, rx2) self.assertEqual(tx, tx2) def test_crypto_kx_server_session_keys(self): client_pk, client_sk = libnacl.crypto_kx_keypair() server_pk, server_sk = libnacl.crypto_kx_keypair() rx, tx, status = libnacl.crypto_kx_server_session_keys(client_pk, client_sk, server_pk) rx2, tx2, status = libnacl.crypto_kx_server_session_keys(client_pk, client_sk, server_pk) self.assertEqual(32, len(rx)) self.assertEqual(32, len(tx)) self.assertEqual(rx, rx2) self.assertEqual(tx, tx2) ``` #### File: tests/unit/test_seal.py ```python import libnacl.sealed import libnacl.public # Import python libs import unittest class TestSealed(unittest.TestCase): ''' ''' def test_secretkey(self): ''' ''' msg = b'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.' key = libnacl.public.SecretKey() box = libnacl.sealed.SealedBox(key) ctxt = box.encrypt(msg) self.assertNotEqual(msg, ctxt) bclear = box.decrypt(ctxt) self.assertEqual(msg, bclear) def test_publickey_only(self): ''' ''' msg = b'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.' key = libnacl.public.SecretKey() key_public = libnacl.public.PublicKey(key.pk) box = libnacl.sealed.SealedBox(key_public) ctxt = box.encrypt(msg) self.assertNotEqual(msg, ctxt) decrypting_box = libnacl.sealed.SealedBox(key) bclear = decrypting_box.decrypt(ctxt) self.assertEqual(msg, bclear) ```
{ "source": "JENScoding/DLA-Sign-Language", "score": 3 }
#### File: JENScoding/DLA-Sign-Language/data_augmentation.py ```python from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split import pandas as pd import os from keras.preprocessing.image import ImageDataGenerator def rotation(data_x, data_y, angle, size): rotate = ImageDataGenerator(rotation_range=angle) rotate.fit(data_x) for x_rot, y_rot in rotate.flow(data_x, data_y, batch_size=size, shuffle=True): break return x_rot, y_rot def vertical_shift(data_x, data_y, range, size): vertical = ImageDataGenerator(width_shift_range=range) vertical.fit(data_x) for x_vert, y_vert in vertical.flow(data_x, data_y, batch_size=size, shuffle=True): break return x_vert, y_vert def brightness_change(data_x, data_y, range, size): brightness = ImageDataGenerator(brightness_range=range) brightness.fit(data_x) for x_bright, y_bright in brightness.flow(data_x, data_y, batch_size=size, shuffle=True): break return x_bright, y_bright ```
{ "source": "jensdittrich/personalizedexams", "score": 3 }
#### File: jensdittrich/personalizedexams/helloworld.py ```python import random as rand import sys import distutils.util as dist def helloWorld(seed, showProblem=False, showSolution=False, outdir=''): # initialize the pseudo-random engine with the seed: rand.seed(seed) # like that all calls and hence the psuedo-rnadom numbers output by that engine will be deterministic: a = rand.randint(25, 40) b = rand.randint(15, 40) if showProblem: print("Problem: What is "+str(a)+"+"+str(b)+"?") if showSolution: print("Solution: The solution to "+str(a)+"+"+str(b)+" is "+str(a+b)+".") if __name__ == "__main__": if len(sys.argv) < 2: raise ValueError("specify seed as first parameter") seed = int(sys.argv[1]) helloWorld(seed, dist.strtobool(sys.argv[2]), dist.strtobool(sys.argv[3])) ```
{ "source": "jensen0914/inference-sdk", "score": 2 }
#### File: inference-sdk/tests/test_3d_segmentation.py ```python import os import json import subprocess import numpy as np from .mock_server_test_case import MockServerTestCase from .utils import term_colors class Test3DSegmentation(MockServerTestCase): input_dir = 'test_3d/' output_dir = 'test_3d_out/' command = '-s3D' test_name = '3D segmentation test' def testOutputFiles(self): input_files = os.listdir(os.path.join('tests/data', self.input_dir)) result = subprocess.run(['./send-inference-request.sh', '-s', '--host', '0.0.0.0', '-p', self.inference_port, '-o', self.output_dir, '-i', self.input_dir] + self.additional_flags.split(), cwd='inference-test-tool', stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') # Test that the command executed successfully self.check_success(result, command_name="Send inference request") self.assertEqual(result.returncode, 0) output_files = os.listdir(os.path.join(self.inference_test_dir, self.output_dir)) # Test that there is one binary mask saved self.assertTrue('output_masks_1.npy' in output_files) # Test that there was one PNG image generated for each input image output_no_index = [name[name.index('_') + 1:] for name in output_files if name.endswith('.png')] for name in input_files: self.assertTrue((name + '.png') in output_no_index) # Test JSON response file_path = os.path.join(self.inference_test_dir, self.output_dir, 'response.json') self.assertTrue(os.path.exists(file_path)) with open(file_path) as json_file: data = json.load(json_file) self.assertIn('protocol_version', data) self.assertIn('parts', data) # Test if the amount of binary buffers is equals to the elements in `parts` output_folder_path = os.path.join(self.inference_test_dir, self.output_dir) output_files = os.listdir(output_folder_path) count_masks = len([f for f in output_files if f.startswith("output_masks_")]) self.assertEqual(count_masks, len(data['parts'])) for index, part in enumerate(data['parts']): self.assertIsInstance(part['binary_type'], str) self.assertIn(part['binary_type'], ['heatmap', 'numeric_label_mask', 'dicom_secondary_capture', 'probability_mask', 'boolean_mask'], "'binary_type' is not among the supported mask types") if part['binary_type'] == 'dicom_secondary_capture' or part['binary_type'] == 'dicom': # The rest of the test does not apply continue self.assertIn('binary_data_shape', part) data_shape = part['binary_data_shape'] self.assertIsInstance(data_shape['timepoints'], int) self.assertIsInstance(data_shape['depth'], int) self.assertIsInstance(data_shape['width'], int) self.assertIsInstance(data_shape['height'], int) # test that the mask shape is as advertised mask = np.fromfile(os.path.join(self.inference_test_dir, self.output_dir, "output_masks_{}.npy".format(index + 1)), dtype=np.uint8) self.assertEqual(mask.shape[0], data_shape['timepoints'] * data_shape['depth'] * data_shape['width'] * data_shape['height']) if part['binary_type'] == 'heatmap': self.validate_heatmap_palettes(part, data) elif part['binary_type'] == 'numeric_label_mask': self.validate_numeric_label_mask(part, mask) print(term_colors.OKGREEN + "3D segmentation test succeeded!!", term_colors.ENDC) ```
{ "source": "jensenbox/python-jamf", "score": 2 }
#### File: jamf/api/advanced_mobile_device_searches_api.py ```python from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from jamf.api_client import ApiClient from jamf.exceptions import ( # noqa: F401 ApiTypeError, ApiValueError ) class AdvancedMobileDeviceSearchesApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def v1_advanced_mobile_device_searches_choices_get(self, criteria, **kwargs): # noqa: E501 """Get Mobile Device Advanced Search criteria choices # noqa: E501 Gets Mobile Device Advanced Search criteria choices. A list of potentially valid choices can be found by navigating to the Criteria page of the Advanced Mobile Device Search creation process. A few are \"App Name\", \"Building\", and \"Display Name\". # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_choices_get(criteria, async_req=True) >>> result = thread.get() :param criteria: (required) :type criteria: str :param site: :type site: str :param contains: :type contains: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: AdvancedSearchCriteriaChoices """ kwargs['_return_http_data_only'] = True return self.v1_advanced_mobile_device_searches_choices_get_with_http_info(criteria, **kwargs) # noqa: E501 def v1_advanced_mobile_device_searches_choices_get_with_http_info(self, criteria, **kwargs): # noqa: E501 """Get Mobile Device Advanced Search criteria choices # noqa: E501 Gets Mobile Device Advanced Search criteria choices. A list of potentially valid choices can be found by navigating to the Criteria page of the Advanced Mobile Device Search creation process. A few are \"App Name\", \"Building\", and \"Display Name\". # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_choices_get_with_http_info(criteria, async_req=True) >>> result = thread.get() :param criteria: (required) :type criteria: str :param site: :type site: str :param contains: :type contains: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(AdvancedSearchCriteriaChoices, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ 'criteria', 'site', 'contains' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_advanced_mobile_device_searches_choices_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'criteria' is set if self.api_client.client_side_validation and ('criteria' not in local_var_params or # noqa: E501 local_var_params['criteria'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `criteria` when calling `v1_advanced_mobile_device_searches_choices_get`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'criteria' in local_var_params and local_var_params['criteria'] is not None: # noqa: E501 query_params.append(('criteria', local_var_params['criteria'])) # noqa: E501 if 'site' in local_var_params and local_var_params['site'] is not None: # noqa: E501 query_params.append(('site', local_var_params['site'])) # noqa: E501 if 'contains' in local_var_params and local_var_params['contains'] is not None: # noqa: E501 query_params.append(('contains', local_var_params['contains'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "AdvancedSearchCriteriaChoices", } return self.api_client.call_api( '/v1/advanced-mobile-device-searches/choices', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_advanced_mobile_device_searches_delete_multiple_post(self, ids, **kwargs): # noqa: E501 """Remove specified Advanced Search objects # noqa: E501 Removes specified Advanced Search Objects # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_delete_multiple_post(ids, async_req=True) >>> result = thread.get() :param ids: ids of the building to be deleted (required) :type ids: Ids :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: None """ kwargs['_return_http_data_only'] = True return self.v1_advanced_mobile_device_searches_delete_multiple_post_with_http_info(ids, **kwargs) # noqa: E501 def v1_advanced_mobile_device_searches_delete_multiple_post_with_http_info(self, ids, **kwargs): # noqa: E501 """Remove specified Advanced Search objects # noqa: E501 Removes specified Advanced Search Objects # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_delete_multiple_post_with_http_info(ids, async_req=True) >>> result = thread.get() :param ids: ids of the building to be deleted (required) :type ids: Ids :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: None """ local_var_params = locals() all_params = [ 'ids' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_advanced_mobile_device_searches_delete_multiple_post" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'ids' is set if self.api_client.client_side_validation and ('ids' not in local_var_params or # noqa: E501 local_var_params['ids'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `ids` when calling `v1_advanced_mobile_device_searches_delete_multiple_post`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'ids' in local_var_params: body_params = local_var_params['ids'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = {} return self.api_client.call_api( '/v1/advanced-mobile-device-searches/delete-multiple', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_advanced_mobile_device_searches_get(self, **kwargs): # noqa: E501 """Get Advanced Search objects # noqa: E501 Gets Advanced Search Objects # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_get(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: AdvancedSearchSearchResults """ kwargs['_return_http_data_only'] = True return self.v1_advanced_mobile_device_searches_get_with_http_info(**kwargs) # noqa: E501 def v1_advanced_mobile_device_searches_get_with_http_info(self, **kwargs): # noqa: E501 """Get Advanced Search objects # noqa: E501 Gets Advanced Search Objects # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_get_with_http_info(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(AdvancedSearchSearchResults, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_advanced_mobile_device_searches_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "AdvancedSearchSearchResults", } return self.api_client.call_api( '/v1/advanced-mobile-device-searches', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_advanced_mobile_device_searches_id_delete(self, id, **kwargs): # noqa: E501 """Remove specified Advanced Search object # noqa: E501 Removes specified Advanced Search Object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_id_delete(id, async_req=True) >>> result = thread.get() :param id: instance id of advanced search record (required) :type id: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: None """ kwargs['_return_http_data_only'] = True return self.v1_advanced_mobile_device_searches_id_delete_with_http_info(id, **kwargs) # noqa: E501 def v1_advanced_mobile_device_searches_id_delete_with_http_info(self, id, **kwargs): # noqa: E501 """Remove specified Advanced Search object # noqa: E501 Removes specified Advanced Search Object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_id_delete_with_http_info(id, async_req=True) >>> result = thread.get() :param id: instance id of advanced search record (required) :type id: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: None """ local_var_params = locals() all_params = [ 'id' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_advanced_mobile_device_searches_id_delete" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'id' is set if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501 local_var_params['id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `id` when calling `v1_advanced_mobile_device_searches_id_delete`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in local_var_params: path_params['id'] = local_var_params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = {} return self.api_client.call_api( '/v1/advanced-mobile-device-searches/{id}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_advanced_mobile_device_searches_id_get(self, id, **kwargs): # noqa: E501 """Get specified Advanced Search object # noqa: E501 Gets Specified Advanced Search Object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_id_get(id, async_req=True) >>> result = thread.get() :param id: id of target Advanced Search (required) :type id: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: AdvancedSearch """ kwargs['_return_http_data_only'] = True return self.v1_advanced_mobile_device_searches_id_get_with_http_info(id, **kwargs) # noqa: E501 def v1_advanced_mobile_device_searches_id_get_with_http_info(self, id, **kwargs): # noqa: E501 """Get specified Advanced Search object # noqa: E501 Gets Specified Advanced Search Object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_id_get_with_http_info(id, async_req=True) >>> result = thread.get() :param id: id of target Advanced Search (required) :type id: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(AdvancedSearch, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ 'id' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_advanced_mobile_device_searches_id_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'id' is set if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501 local_var_params['id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `id` when calling `v1_advanced_mobile_device_searches_id_get`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in local_var_params: path_params['id'] = local_var_params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "AdvancedSearch", 404: "ApiError", } return self.api_client.call_api( '/v1/advanced-mobile-device-searches/{id}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_advanced_mobile_device_searches_id_put(self, id, advanced_search, **kwargs): # noqa: E501 """Get specified Advanced Search object # noqa: E501 Gets Specified Advanced Search Object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_id_put(id, advanced_search, async_req=True) >>> result = thread.get() :param id: id of target Advanced Search (required) :type id: str :param advanced_search: (required) :type advanced_search: AdvancedSearch :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: AdvancedSearch """ kwargs['_return_http_data_only'] = True return self.v1_advanced_mobile_device_searches_id_put_with_http_info(id, advanced_search, **kwargs) # noqa: E501 def v1_advanced_mobile_device_searches_id_put_with_http_info(self, id, advanced_search, **kwargs): # noqa: E501 """Get specified Advanced Search object # noqa: E501 Gets Specified Advanced Search Object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_id_put_with_http_info(id, advanced_search, async_req=True) >>> result = thread.get() :param id: id of target Advanced Search (required) :type id: str :param advanced_search: (required) :type advanced_search: AdvancedSearch :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(AdvancedSearch, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ 'id', 'advanced_search' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_advanced_mobile_device_searches_id_put" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'id' is set if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501 local_var_params['id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `id` when calling `v1_advanced_mobile_device_searches_id_put`") # noqa: E501 # verify the required parameter 'advanced_search' is set if self.api_client.client_side_validation and ('advanced_search' not in local_var_params or # noqa: E501 local_var_params['advanced_search'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `advanced_search` when calling `v1_advanced_mobile_device_searches_id_put`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in local_var_params: path_params['id'] = local_var_params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'advanced_search' in local_var_params: body_params = local_var_params['advanced_search'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "AdvancedSearch", 404: "ApiError", } return self.api_client.call_api( '/v1/advanced-mobile-device-searches/{id}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_advanced_mobile_device_searches_post(self, advanced_search, **kwargs): # noqa: E501 """Create Advanced Search object # noqa: E501 Creates Advanced Search Object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_post(advanced_search, async_req=True) >>> result = thread.get() :param advanced_search: (required) :type advanced_search: AdvancedSearch :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: HrefResponse """ kwargs['_return_http_data_only'] = True return self.v1_advanced_mobile_device_searches_post_with_http_info(advanced_search, **kwargs) # noqa: E501 def v1_advanced_mobile_device_searches_post_with_http_info(self, advanced_search, **kwargs): # noqa: E501 """Create Advanced Search object # noqa: E501 Creates Advanced Search Object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_advanced_mobile_device_searches_post_with_http_info(advanced_search, async_req=True) >>> result = thread.get() :param advanced_search: (required) :type advanced_search: AdvancedSearch :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(HrefResponse, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ 'advanced_search' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_advanced_mobile_device_searches_post" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'advanced_search' is set if self.api_client.client_side_validation and ('advanced_search' not in local_var_params or # noqa: E501 local_var_params['advanced_search'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `advanced_search` when calling `v1_advanced_mobile_device_searches_post`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'advanced_search' in local_var_params: body_params = local_var_params['advanced_search'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 201: "HrefResponse", } return self.api_client.call_api( '/v1/advanced-mobile-device-searches', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) ``` #### File: jamf/models/active_patch_history.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ActivePatchHistory(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'patch_id': 'int', 'patch_history_id': 'int', 'device_id': 'int', 'device_name': 'str', 'building_id': 'int', 'building_name': 'str', 'department_id': 'int', 'department_name': 'str', 'site_id': 'int', 'site_name': 'str', 'username': 'str', 'os_version': 'str', 'last_check_in': 'datetime', 'installed_version': 'str' } attribute_map = { 'patch_id': 'patchId', 'patch_history_id': 'patchHistoryId', 'device_id': 'deviceId', 'device_name': 'deviceName', 'building_id': 'buildingId', 'building_name': 'buildingName', 'department_id': 'departmentId', 'department_name': 'departmentName', 'site_id': 'siteId', 'site_name': 'siteName', 'username': 'username', 'os_version': 'osVersion', 'last_check_in': 'lastCheckIn', 'installed_version': 'installedVersion' } def __init__(self, patch_id=None, patch_history_id=None, device_id=None, device_name=None, building_id=None, building_name=None, department_id=None, department_name=None, site_id=None, site_name=None, username=None, os_version=None, last_check_in=None, installed_version=None, local_vars_configuration=None): # noqa: E501 """ActivePatchHistory - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._patch_id = None self._patch_history_id = None self._device_id = None self._device_name = None self._building_id = None self._building_name = None self._department_id = None self._department_name = None self._site_id = None self._site_name = None self._username = None self._os_version = None self._last_check_in = None self._installed_version = None self.discriminator = None if patch_id is not None: self.patch_id = patch_id if patch_history_id is not None: self.patch_history_id = patch_history_id if device_id is not None: self.device_id = device_id if device_name is not None: self.device_name = device_name if building_id is not None: self.building_id = building_id if building_name is not None: self.building_name = building_name if department_id is not None: self.department_id = department_id if department_name is not None: self.department_name = department_name if site_id is not None: self.site_id = site_id if site_name is not None: self.site_name = site_name if username is not None: self.username = username if os_version is not None: self.os_version = os_version if last_check_in is not None: self.last_check_in = last_check_in if installed_version is not None: self.installed_version = installed_version @property def patch_id(self): """Gets the patch_id of this ActivePatchHistory. # noqa: E501 :return: The patch_id of this ActivePatchHistory. # noqa: E501 :rtype: int """ return self._patch_id @patch_id.setter def patch_id(self, patch_id): """Sets the patch_id of this ActivePatchHistory. :param patch_id: The patch_id of this ActivePatchHistory. # noqa: E501 :type patch_id: int """ self._patch_id = patch_id @property def patch_history_id(self): """Gets the patch_history_id of this ActivePatchHistory. # noqa: E501 :return: The patch_history_id of this ActivePatchHistory. # noqa: E501 :rtype: int """ return self._patch_history_id @patch_history_id.setter def patch_history_id(self, patch_history_id): """Sets the patch_history_id of this ActivePatchHistory. :param patch_history_id: The patch_history_id of this ActivePatchHistory. # noqa: E501 :type patch_history_id: int """ self._patch_history_id = patch_history_id @property def device_id(self): """Gets the device_id of this ActivePatchHistory. # noqa: E501 :return: The device_id of this ActivePatchHistory. # noqa: E501 :rtype: int """ return self._device_id @device_id.setter def device_id(self, device_id): """Sets the device_id of this ActivePatchHistory. :param device_id: The device_id of this ActivePatchHistory. # noqa: E501 :type device_id: int """ self._device_id = device_id @property def device_name(self): """Gets the device_name of this ActivePatchHistory. # noqa: E501 :return: The device_name of this ActivePatchHistory. # noqa: E501 :rtype: str """ return self._device_name @device_name.setter def device_name(self, device_name): """Sets the device_name of this ActivePatchHistory. :param device_name: The device_name of this ActivePatchHistory. # noqa: E501 :type device_name: str """ self._device_name = device_name @property def building_id(self): """Gets the building_id of this ActivePatchHistory. # noqa: E501 :return: The building_id of this ActivePatchHistory. # noqa: E501 :rtype: int """ return self._building_id @building_id.setter def building_id(self, building_id): """Sets the building_id of this ActivePatchHistory. :param building_id: The building_id of this ActivePatchHistory. # noqa: E501 :type building_id: int """ self._building_id = building_id @property def building_name(self): """Gets the building_name of this ActivePatchHistory. # noqa: E501 :return: The building_name of this ActivePatchHistory. # noqa: E501 :rtype: str """ return self._building_name @building_name.setter def building_name(self, building_name): """Sets the building_name of this ActivePatchHistory. :param building_name: The building_name of this ActivePatchHistory. # noqa: E501 :type building_name: str """ self._building_name = building_name @property def department_id(self): """Gets the department_id of this ActivePatchHistory. # noqa: E501 :return: The department_id of this ActivePatchHistory. # noqa: E501 :rtype: int """ return self._department_id @department_id.setter def department_id(self, department_id): """Sets the department_id of this ActivePatchHistory. :param department_id: The department_id of this ActivePatchHistory. # noqa: E501 :type department_id: int """ self._department_id = department_id @property def department_name(self): """Gets the department_name of this ActivePatchHistory. # noqa: E501 :return: The department_name of this ActivePatchHistory. # noqa: E501 :rtype: str """ return self._department_name @department_name.setter def department_name(self, department_name): """Sets the department_name of this ActivePatchHistory. :param department_name: The department_name of this ActivePatchHistory. # noqa: E501 :type department_name: str """ self._department_name = department_name @property def site_id(self): """Gets the site_id of this ActivePatchHistory. # noqa: E501 :return: The site_id of this ActivePatchHistory. # noqa: E501 :rtype: int """ return self._site_id @site_id.setter def site_id(self, site_id): """Sets the site_id of this ActivePatchHistory. :param site_id: The site_id of this ActivePatchHistory. # noqa: E501 :type site_id: int """ self._site_id = site_id @property def site_name(self): """Gets the site_name of this ActivePatchHistory. # noqa: E501 :return: The site_name of this ActivePatchHistory. # noqa: E501 :rtype: str """ return self._site_name @site_name.setter def site_name(self, site_name): """Sets the site_name of this ActivePatchHistory. :param site_name: The site_name of this ActivePatchHistory. # noqa: E501 :type site_name: str """ self._site_name = site_name @property def username(self): """Gets the username of this ActivePatchHistory. # noqa: E501 :return: The username of this ActivePatchHistory. # noqa: E501 :rtype: str """ return self._username @username.setter def username(self, username): """Sets the username of this ActivePatchHistory. :param username: The username of this ActivePatchHistory. # noqa: E501 :type username: str """ self._username = username @property def os_version(self): """Gets the os_version of this ActivePatchHistory. # noqa: E501 :return: The os_version of this ActivePatchHistory. # noqa: E501 :rtype: str """ return self._os_version @os_version.setter def os_version(self, os_version): """Sets the os_version of this ActivePatchHistory. :param os_version: The os_version of this ActivePatchHistory. # noqa: E501 :type os_version: str """ self._os_version = os_version @property def last_check_in(self): """Gets the last_check_in of this ActivePatchHistory. # noqa: E501 :return: The last_check_in of this ActivePatchHistory. # noqa: E501 :rtype: datetime """ return self._last_check_in @last_check_in.setter def last_check_in(self, last_check_in): """Sets the last_check_in of this ActivePatchHistory. :param last_check_in: The last_check_in of this ActivePatchHistory. # noqa: E501 :type last_check_in: datetime """ self._last_check_in = last_check_in @property def installed_version(self): """Gets the installed_version of this ActivePatchHistory. # noqa: E501 :return: The installed_version of this ActivePatchHistory. # noqa: E501 :rtype: str """ return self._installed_version @installed_version.setter def installed_version(self, installed_version): """Sets the installed_version of this ActivePatchHistory. :param installed_version: The installed_version of this ActivePatchHistory. # noqa: E501 :type installed_version: str """ self._installed_version = installed_version def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ActivePatchHistory): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ActivePatchHistory): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/app_dynamics_config.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class AppDynamicsConfig(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'enable_eum': 'bool', 'app_key': 'str', 'adrum_ext_url_http': 'str', 'adrum_ext_url_https': 'str', 'adrum_script_http': 'str', 'adrum_script_https': 'str', 'beacon_url_http': 'str', 'beacon_url_https': 'str' } attribute_map = { 'enable_eum': 'enableEum', 'app_key': 'appKey', 'adrum_ext_url_http': 'adrumExtUrlHttp', 'adrum_ext_url_https': 'adrumExtUrlHttps', 'adrum_script_http': 'adrumScriptHttp', 'adrum_script_https': 'adrumScriptHttps', 'beacon_url_http': 'beaconUrlHttp', 'beacon_url_https': 'beaconUrlHttps' } def __init__(self, enable_eum=False, app_key=None, adrum_ext_url_http=None, adrum_ext_url_https=None, adrum_script_http=None, adrum_script_https=None, beacon_url_http=None, beacon_url_https=None, local_vars_configuration=None): # noqa: E501 """AppDynamicsConfig - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._enable_eum = None self._app_key = None self._adrum_ext_url_http = None self._adrum_ext_url_https = None self._adrum_script_http = None self._adrum_script_https = None self._beacon_url_http = None self._beacon_url_https = None self.discriminator = None self.enable_eum = enable_eum self.app_key = app_key self.adrum_ext_url_http = adrum_ext_url_http self.adrum_ext_url_https = adrum_ext_url_https self.adrum_script_http = adrum_script_http self.adrum_script_https = adrum_script_https self.beacon_url_http = beacon_url_http self.beacon_url_https = beacon_url_https @property def enable_eum(self): """Gets the enable_eum of this AppDynamicsConfig. # noqa: E501 :return: The enable_eum of this AppDynamicsConfig. # noqa: E501 :rtype: bool """ return self._enable_eum @enable_eum.setter def enable_eum(self, enable_eum): """Sets the enable_eum of this AppDynamicsConfig. :param enable_eum: The enable_eum of this AppDynamicsConfig. # noqa: E501 :type enable_eum: bool """ if self.local_vars_configuration.client_side_validation and enable_eum is None: # noqa: E501 raise ValueError("Invalid value for `enable_eum`, must not be `None`") # noqa: E501 self._enable_eum = enable_eum @property def app_key(self): """Gets the app_key of this AppDynamicsConfig. # noqa: E501 :return: The app_key of this AppDynamicsConfig. # noqa: E501 :rtype: str """ return self._app_key @app_key.setter def app_key(self, app_key): """Sets the app_key of this AppDynamicsConfig. :param app_key: The app_key of this AppDynamicsConfig. # noqa: E501 :type app_key: str """ if self.local_vars_configuration.client_side_validation and app_key is None: # noqa: E501 raise ValueError("Invalid value for `app_key`, must not be `None`") # noqa: E501 self._app_key = app_key @property def adrum_ext_url_http(self): """Gets the adrum_ext_url_http of this AppDynamicsConfig. # noqa: E501 :return: The adrum_ext_url_http of this AppDynamicsConfig. # noqa: E501 :rtype: str """ return self._adrum_ext_url_http @adrum_ext_url_http.setter def adrum_ext_url_http(self, adrum_ext_url_http): """Sets the adrum_ext_url_http of this AppDynamicsConfig. :param adrum_ext_url_http: The adrum_ext_url_http of this AppDynamicsConfig. # noqa: E501 :type adrum_ext_url_http: str """ if self.local_vars_configuration.client_side_validation and adrum_ext_url_http is None: # noqa: E501 raise ValueError("Invalid value for `adrum_ext_url_http`, must not be `None`") # noqa: E501 self._adrum_ext_url_http = adrum_ext_url_http @property def adrum_ext_url_https(self): """Gets the adrum_ext_url_https of this AppDynamicsConfig. # noqa: E501 :return: The adrum_ext_url_https of this AppDynamicsConfig. # noqa: E501 :rtype: str """ return self._adrum_ext_url_https @adrum_ext_url_https.setter def adrum_ext_url_https(self, adrum_ext_url_https): """Sets the adrum_ext_url_https of this AppDynamicsConfig. :param adrum_ext_url_https: The adrum_ext_url_https of this AppDynamicsConfig. # noqa: E501 :type adrum_ext_url_https: str """ if self.local_vars_configuration.client_side_validation and adrum_ext_url_https is None: # noqa: E501 raise ValueError("Invalid value for `adrum_ext_url_https`, must not be `None`") # noqa: E501 self._adrum_ext_url_https = adrum_ext_url_https @property def adrum_script_http(self): """Gets the adrum_script_http of this AppDynamicsConfig. # noqa: E501 :return: The adrum_script_http of this AppDynamicsConfig. # noqa: E501 :rtype: str """ return self._adrum_script_http @adrum_script_http.setter def adrum_script_http(self, adrum_script_http): """Sets the adrum_script_http of this AppDynamicsConfig. :param adrum_script_http: The adrum_script_http of this AppDynamicsConfig. # noqa: E501 :type adrum_script_http: str """ if self.local_vars_configuration.client_side_validation and adrum_script_http is None: # noqa: E501 raise ValueError("Invalid value for `adrum_script_http`, must not be `None`") # noqa: E501 self._adrum_script_http = adrum_script_http @property def adrum_script_https(self): """Gets the adrum_script_https of this AppDynamicsConfig. # noqa: E501 :return: The adrum_script_https of this AppDynamicsConfig. # noqa: E501 :rtype: str """ return self._adrum_script_https @adrum_script_https.setter def adrum_script_https(self, adrum_script_https): """Sets the adrum_script_https of this AppDynamicsConfig. :param adrum_script_https: The adrum_script_https of this AppDynamicsConfig. # noqa: E501 :type adrum_script_https: str """ if self.local_vars_configuration.client_side_validation and adrum_script_https is None: # noqa: E501 raise ValueError("Invalid value for `adrum_script_https`, must not be `None`") # noqa: E501 self._adrum_script_https = adrum_script_https @property def beacon_url_http(self): """Gets the beacon_url_http of this AppDynamicsConfig. # noqa: E501 :return: The beacon_url_http of this AppDynamicsConfig. # noqa: E501 :rtype: str """ return self._beacon_url_http @beacon_url_http.setter def beacon_url_http(self, beacon_url_http): """Sets the beacon_url_http of this AppDynamicsConfig. :param beacon_url_http: The beacon_url_http of this AppDynamicsConfig. # noqa: E501 :type beacon_url_http: str """ if self.local_vars_configuration.client_side_validation and beacon_url_http is None: # noqa: E501 raise ValueError("Invalid value for `beacon_url_http`, must not be `None`") # noqa: E501 self._beacon_url_http = beacon_url_http @property def beacon_url_https(self): """Gets the beacon_url_https of this AppDynamicsConfig. # noqa: E501 :return: The beacon_url_https of this AppDynamicsConfig. # noqa: E501 :rtype: str """ return self._beacon_url_https @beacon_url_https.setter def beacon_url_https(self, beacon_url_https): """Sets the beacon_url_https of this AppDynamicsConfig. :param beacon_url_https: The beacon_url_https of this AppDynamicsConfig. # noqa: E501 :type beacon_url_https: str """ if self.local_vars_configuration.client_side_validation and beacon_url_https is None: # noqa: E501 raise ValueError("Invalid value for `beacon_url_https`, must not be `None`") # noqa: E501 self._beacon_url_https = beacon_url_https def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, AppDynamicsConfig): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, AppDynamicsConfig): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/apple_tv_details.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class AppleTvDetails(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'model': 'str', 'model_identifier': 'str', 'model_number': 'str', 'is_supervised': 'bool', 'airplay_password': '<PASSWORD>', 'device_id': 'str', 'locales': 'str', 'purchasing': 'Purchasing', 'configuration_profiles': 'list[ConfigurationProfile]' } attribute_map = { 'model': 'model', 'model_identifier': 'modelIdentifier', 'model_number': 'modelNumber', 'is_supervised': 'isSupervised', 'airplay_password': '<PASSWORD>', 'device_id': 'deviceId', 'locales': 'locales', 'purchasing': 'purchasing', 'configuration_profiles': 'configurationProfiles' } def __init__(self, model=None, model_identifier=None, model_number=None, is_supervised=None, airplay_password=<PASSWORD>, device_id=None, locales=None, purchasing=None, configuration_profiles=None, local_vars_configuration=None): # noqa: E501 """AppleTvDetails - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._model = None self._model_identifier = None self._model_number = None self._is_supervised = None self._airplay_password = None self._device_id = None self._locales = None self._purchasing = None self._configuration_profiles = None self.discriminator = None if model is not None: self.model = model if model_identifier is not None: self.model_identifier = model_identifier if model_number is not None: self.model_number = model_number if is_supervised is not None: self.is_supervised = is_supervised if airplay_password is not None: self.airplay_password = <PASSWORD> if device_id is not None: self.device_id = device_id if locales is not None: self.locales = locales if purchasing is not None: self.purchasing = purchasing if configuration_profiles is not None: self.configuration_profiles = configuration_profiles @property def model(self): """Gets the model of this AppleTvDetails. # noqa: E501 :return: The model of this AppleTvDetails. # noqa: E501 :rtype: str """ return self._model @model.setter def model(self, model): """Sets the model of this AppleTvDetails. :param model: The model of this AppleTvDetails. # noqa: E501 :type model: str """ self._model = model @property def model_identifier(self): """Gets the model_identifier of this AppleTvDetails. # noqa: E501 :return: The model_identifier of this AppleTvDetails. # noqa: E501 :rtype: str """ return self._model_identifier @model_identifier.setter def model_identifier(self, model_identifier): """Sets the model_identifier of this AppleTvDetails. :param model_identifier: The model_identifier of this AppleTvDetails. # noqa: E501 :type model_identifier: str """ self._model_identifier = model_identifier @property def model_number(self): """Gets the model_number of this AppleTvDetails. # noqa: E501 :return: The model_number of this AppleTvDetails. # noqa: E501 :rtype: str """ return self._model_number @model_number.setter def model_number(self, model_number): """Sets the model_number of this AppleTvDetails. :param model_number: The model_number of this AppleTvDetails. # noqa: E501 :type model_number: str """ self._model_number = model_number @property def is_supervised(self): """Gets the is_supervised of this AppleTvDetails. # noqa: E501 :return: The is_supervised of this AppleTvDetails. # noqa: E501 :rtype: bool """ return self._is_supervised @is_supervised.setter def is_supervised(self, is_supervised): """Sets the is_supervised of this AppleTvDetails. :param is_supervised: The is_supervised of this AppleTvDetails. # noqa: E501 :type is_supervised: bool """ self._is_supervised = is_supervised @property def airplay_password(self): """Gets the airplay_password of this AppleTvDetails. # noqa: E501 :return: The airplay_password of this AppleTvDetails. # noqa: E501 :rtype: str """ return self._airplay_password @airplay_password.setter def airplay_password(self, airplay_password): """Sets the airplay_password of this AppleTvDetails. :param airplay_password: The airplay_password of this AppleTvDetails. # noqa: E501 :type airplay_password: str """ self._airplay_password = airplay_password @property def device_id(self): """Gets the device_id of this AppleTvDetails. # noqa: E501 :return: The device_id of this AppleTvDetails. # noqa: E501 :rtype: str """ return self._device_id @device_id.setter def device_id(self, device_id): """Sets the device_id of this AppleTvDetails. :param device_id: The device_id of this AppleTvDetails. # noqa: E501 :type device_id: str """ self._device_id = device_id @property def locales(self): """Gets the locales of this AppleTvDetails. # noqa: E501 :return: The locales of this AppleTvDetails. # noqa: E501 :rtype: str """ return self._locales @locales.setter def locales(self, locales): """Sets the locales of this AppleTvDetails. :param locales: The locales of this AppleTvDetails. # noqa: E501 :type locales: str """ self._locales = locales @property def purchasing(self): """Gets the purchasing of this AppleTvDetails. # noqa: E501 :return: The purchasing of this AppleTvDetails. # noqa: E501 :rtype: Purchasing """ return self._purchasing @purchasing.setter def purchasing(self, purchasing): """Sets the purchasing of this AppleTvDetails. :param purchasing: The purchasing of this AppleTvDetails. # noqa: E501 :type purchasing: Purchasing """ self._purchasing = purchasing @property def configuration_profiles(self): """Gets the configuration_profiles of this AppleTvDetails. # noqa: E501 :return: The configuration_profiles of this AppleTvDetails. # noqa: E501 :rtype: list[ConfigurationProfile] """ return self._configuration_profiles @configuration_profiles.setter def configuration_profiles(self, configuration_profiles): """Sets the configuration_profiles of this AppleTvDetails. :param configuration_profiles: The configuration_profiles of this AppleTvDetails. # noqa: E501 :type configuration_profiles: list[ConfigurationProfile] """ self._configuration_profiles = configuration_profiles def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, AppleTvDetails): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, AppleTvDetails): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/cache_settings.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class CacheSettings(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'id': 'str', 'name': 'str', 'cache_type': 'str', 'time_to_live_seconds': 'int', 'time_to_idle_seconds': 'int', 'ehcache_max_bytes_local_heap': 'str', 'cache_unique_id': 'str', 'elasticache': 'bool', 'memcached_endpoints': 'list[MemcachedEndpoints]' } attribute_map = { 'id': 'id', 'name': 'name', 'cache_type': 'cacheType', 'time_to_live_seconds': 'timeToLiveSeconds', 'time_to_idle_seconds': 'timeToIdleSeconds', 'ehcache_max_bytes_local_heap': 'ehcacheMaxBytesLocalHeap', 'cache_unique_id': 'cacheUniqueId', 'elasticache': 'elasticache', 'memcached_endpoints': 'memcachedEndpoints' } def __init__(self, id='0', name='cache configuration', cache_type=None, time_to_live_seconds=None, time_to_idle_seconds=None, ehcache_max_bytes_local_heap='null', cache_unique_id=None, elasticache=False, memcached_endpoints=None, local_vars_configuration=None): # noqa: E501 """CacheSettings - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._id = None self._name = None self._cache_type = None self._time_to_live_seconds = None self._time_to_idle_seconds = None self._ehcache_max_bytes_local_heap = None self._cache_unique_id = None self._elasticache = None self._memcached_endpoints = None self.discriminator = None if id is not None: self.id = id if name is not None: self.name = name self.cache_type = cache_type self.time_to_live_seconds = time_to_live_seconds if time_to_idle_seconds is not None: self.time_to_idle_seconds = time_to_idle_seconds if ehcache_max_bytes_local_heap is not None: self.ehcache_max_bytes_local_heap = ehcache_max_bytes_local_heap self.cache_unique_id = cache_unique_id if elasticache is not None: self.elasticache = elasticache self.memcached_endpoints = memcached_endpoints @property def id(self): """Gets the id of this CacheSettings. # noqa: E501 :return: The id of this CacheSettings. # noqa: E501 :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this CacheSettings. :param id: The id of this CacheSettings. # noqa: E501 :type id: str """ self._id = id @property def name(self): """Gets the name of this CacheSettings. # noqa: E501 :return: The name of this CacheSettings. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this CacheSettings. :param name: The name of this CacheSettings. # noqa: E501 :type name: str """ self._name = name @property def cache_type(self): """Gets the cache_type of this CacheSettings. # noqa: E501 :return: The cache_type of this CacheSettings. # noqa: E501 :rtype: str """ return self._cache_type @cache_type.setter def cache_type(self, cache_type): """Sets the cache_type of this CacheSettings. :param cache_type: The cache_type of this CacheSettings. # noqa: E501 :type cache_type: str """ if self.local_vars_configuration.client_side_validation and cache_type is None: # noqa: E501 raise ValueError("Invalid value for `cache_type`, must not be `None`") # noqa: E501 self._cache_type = cache_type @property def time_to_live_seconds(self): """Gets the time_to_live_seconds of this CacheSettings. # noqa: E501 :return: The time_to_live_seconds of this CacheSettings. # noqa: E501 :rtype: int """ return self._time_to_live_seconds @time_to_live_seconds.setter def time_to_live_seconds(self, time_to_live_seconds): """Sets the time_to_live_seconds of this CacheSettings. :param time_to_live_seconds: The time_to_live_seconds of this CacheSettings. # noqa: E501 :type time_to_live_seconds: int """ if self.local_vars_configuration.client_side_validation and time_to_live_seconds is None: # noqa: E501 raise ValueError("Invalid value for `time_to_live_seconds`, must not be `None`") # noqa: E501 self._time_to_live_seconds = time_to_live_seconds @property def time_to_idle_seconds(self): """Gets the time_to_idle_seconds of this CacheSettings. # noqa: E501 :return: The time_to_idle_seconds of this CacheSettings. # noqa: E501 :rtype: int """ return self._time_to_idle_seconds @time_to_idle_seconds.setter def time_to_idle_seconds(self, time_to_idle_seconds): """Sets the time_to_idle_seconds of this CacheSettings. :param time_to_idle_seconds: The time_to_idle_seconds of this CacheSettings. # noqa: E501 :type time_to_idle_seconds: int """ self._time_to_idle_seconds = time_to_idle_seconds @property def ehcache_max_bytes_local_heap(self): """Gets the ehcache_max_bytes_local_heap of this CacheSettings. # noqa: E501 :return: The ehcache_max_bytes_local_heap of this CacheSettings. # noqa: E501 :rtype: str """ return self._ehcache_max_bytes_local_heap @ehcache_max_bytes_local_heap.setter def ehcache_max_bytes_local_heap(self, ehcache_max_bytes_local_heap): """Sets the ehcache_max_bytes_local_heap of this CacheSettings. :param ehcache_max_bytes_local_heap: The ehcache_max_bytes_local_heap of this CacheSettings. # noqa: E501 :type ehcache_max_bytes_local_heap: str """ self._ehcache_max_bytes_local_heap = ehcache_max_bytes_local_heap @property def cache_unique_id(self): """Gets the cache_unique_id of this CacheSettings. # noqa: E501 The default is for Jamf Pro to generate a UUID, so we can only give an example instead. # noqa: E501 :return: The cache_unique_id of this CacheSettings. # noqa: E501 :rtype: str """ return self._cache_unique_id @cache_unique_id.setter def cache_unique_id(self, cache_unique_id): """Sets the cache_unique_id of this CacheSettings. The default is for Jamf Pro to generate a UUID, so we can only give an example instead. # noqa: E501 :param cache_unique_id: The cache_unique_id of this CacheSettings. # noqa: E501 :type cache_unique_id: str """ if self.local_vars_configuration.client_side_validation and cache_unique_id is None: # noqa: E501 raise ValueError("Invalid value for `cache_unique_id`, must not be `None`") # noqa: E501 self._cache_unique_id = cache_unique_id @property def elasticache(self): """Gets the elasticache of this CacheSettings. # noqa: E501 :return: The elasticache of this CacheSettings. # noqa: E501 :rtype: bool """ return self._elasticache @elasticache.setter def elasticache(self, elasticache): """Sets the elasticache of this CacheSettings. :param elasticache: The elasticache of this CacheSettings. # noqa: E501 :type elasticache: bool """ self._elasticache = elasticache @property def memcached_endpoints(self): """Gets the memcached_endpoints of this CacheSettings. # noqa: E501 :return: The memcached_endpoints of this CacheSettings. # noqa: E501 :rtype: list[MemcachedEndpoints] """ return self._memcached_endpoints @memcached_endpoints.setter def memcached_endpoints(self, memcached_endpoints): """Sets the memcached_endpoints of this CacheSettings. :param memcached_endpoints: The memcached_endpoints of this CacheSettings. # noqa: E501 :type memcached_endpoints: list[MemcachedEndpoints] """ if self.local_vars_configuration.client_side_validation and memcached_endpoints is None: # noqa: E501 raise ValueError("Invalid value for `memcached_endpoints`, must not be `None`") # noqa: E501 self._memcached_endpoints = memcached_endpoints def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CacheSettings): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, CacheSettings): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_application.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerApplication(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str', 'path': 'str', 'version': 'str', 'mac_app_store': 'bool', 'size_megabytes': 'int', 'bundle_id': 'str', 'update_available': 'bool', 'external_version_id': 'str' } attribute_map = { 'name': 'name', 'path': 'path', 'version': 'version', 'mac_app_store': 'macAppStore', 'size_megabytes': 'sizeMegabytes', 'bundle_id': 'bundleId', 'update_available': 'updateAvailable', 'external_version_id': 'externalVersionId' } def __init__(self, name=None, path=None, version=None, mac_app_store=None, size_megabytes=None, bundle_id=None, update_available=None, external_version_id=None, local_vars_configuration=None): # noqa: E501 """ComputerApplication - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self._path = None self._version = None self._mac_app_store = None self._size_megabytes = None self._bundle_id = None self._update_available = None self._external_version_id = None self.discriminator = None if name is not None: self.name = name if path is not None: self.path = path if version is not None: self.version = version if mac_app_store is not None: self.mac_app_store = mac_app_store if size_megabytes is not None: self.size_megabytes = size_megabytes if bundle_id is not None: self.bundle_id = bundle_id if update_available is not None: self.update_available = update_available if external_version_id is not None: self.external_version_id = external_version_id @property def name(self): """Gets the name of this ComputerApplication. # noqa: E501 :return: The name of this ComputerApplication. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this ComputerApplication. :param name: The name of this ComputerApplication. # noqa: E501 :type name: str """ self._name = name @property def path(self): """Gets the path of this ComputerApplication. # noqa: E501 :return: The path of this ComputerApplication. # noqa: E501 :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this ComputerApplication. :param path: The path of this ComputerApplication. # noqa: E501 :type path: str """ self._path = path @property def version(self): """Gets the version of this ComputerApplication. # noqa: E501 :return: The version of this ComputerApplication. # noqa: E501 :rtype: str """ return self._version @version.setter def version(self, version): """Sets the version of this ComputerApplication. :param version: The version of this ComputerApplication. # noqa: E501 :type version: str """ self._version = version @property def mac_app_store(self): """Gets the mac_app_store of this ComputerApplication. # noqa: E501 :return: The mac_app_store of this ComputerApplication. # noqa: E501 :rtype: bool """ return self._mac_app_store @mac_app_store.setter def mac_app_store(self, mac_app_store): """Sets the mac_app_store of this ComputerApplication. :param mac_app_store: The mac_app_store of this ComputerApplication. # noqa: E501 :type mac_app_store: bool """ self._mac_app_store = mac_app_store @property def size_megabytes(self): """Gets the size_megabytes of this ComputerApplication. # noqa: E501 :return: The size_megabytes of this ComputerApplication. # noqa: E501 :rtype: int """ return self._size_megabytes @size_megabytes.setter def size_megabytes(self, size_megabytes): """Sets the size_megabytes of this ComputerApplication. :param size_megabytes: The size_megabytes of this ComputerApplication. # noqa: E501 :type size_megabytes: int """ self._size_megabytes = size_megabytes @property def bundle_id(self): """Gets the bundle_id of this ComputerApplication. # noqa: E501 :return: The bundle_id of this ComputerApplication. # noqa: E501 :rtype: str """ return self._bundle_id @bundle_id.setter def bundle_id(self, bundle_id): """Sets the bundle_id of this ComputerApplication. :param bundle_id: The bundle_id of this ComputerApplication. # noqa: E501 :type bundle_id: str """ self._bundle_id = bundle_id @property def update_available(self): """Gets the update_available of this ComputerApplication. # noqa: E501 :return: The update_available of this ComputerApplication. # noqa: E501 :rtype: bool """ return self._update_available @update_available.setter def update_available(self, update_available): """Sets the update_available of this ComputerApplication. :param update_available: The update_available of this ComputerApplication. # noqa: E501 :type update_available: bool """ self._update_available = update_available @property def external_version_id(self): """Gets the external_version_id of this ComputerApplication. # noqa: E501 The app's external version ID. It can be used in the iTunes Search API to decide if the app needs to be updated # noqa: E501 :return: The external_version_id of this ComputerApplication. # noqa: E501 :rtype: str """ return self._external_version_id @external_version_id.setter def external_version_id(self, external_version_id): """Sets the external_version_id of this ComputerApplication. The app's external version ID. It can be used in the iTunes Search API to decide if the app needs to be updated # noqa: E501 :param external_version_id: The external_version_id of this ComputerApplication. # noqa: E501 :type external_version_id: str """ self._external_version_id = external_version_id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerApplication): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerApplication): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_certificate.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerCertificate(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'common_name': 'str', 'identity': 'bool', 'expiration_date': 'datetime', 'username': 'str', 'lifecycle_status': 'str', 'certificate_status': 'str' } attribute_map = { 'common_name': 'commonName', 'identity': 'identity', 'expiration_date': 'expirationDate', 'username': 'username', 'lifecycle_status': 'lifecycleStatus', 'certificate_status': 'certificateStatus' } def __init__(self, common_name=None, identity=None, expiration_date=None, username=None, lifecycle_status=None, certificate_status=None, local_vars_configuration=None): # noqa: E501 """ComputerCertificate - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._common_name = None self._identity = None self._expiration_date = None self._username = None self._lifecycle_status = None self._certificate_status = None self.discriminator = None if common_name is not None: self.common_name = common_name if identity is not None: self.identity = identity if expiration_date is not None: self.expiration_date = expiration_date if username is not None: self.username = username if lifecycle_status is not None: self.lifecycle_status = lifecycle_status if certificate_status is not None: self.certificate_status = certificate_status @property def common_name(self): """Gets the common_name of this ComputerCertificate. # noqa: E501 :return: The common_name of this ComputerCertificate. # noqa: E501 :rtype: str """ return self._common_name @common_name.setter def common_name(self, common_name): """Sets the common_name of this ComputerCertificate. :param common_name: The common_name of this ComputerCertificate. # noqa: E501 :type common_name: str """ self._common_name = common_name @property def identity(self): """Gets the identity of this ComputerCertificate. # noqa: E501 :return: The identity of this ComputerCertificate. # noqa: E501 :rtype: bool """ return self._identity @identity.setter def identity(self, identity): """Sets the identity of this ComputerCertificate. :param identity: The identity of this ComputerCertificate. # noqa: E501 :type identity: bool """ self._identity = identity @property def expiration_date(self): """Gets the expiration_date of this ComputerCertificate. # noqa: E501 :return: The expiration_date of this ComputerCertificate. # noqa: E501 :rtype: datetime """ return self._expiration_date @expiration_date.setter def expiration_date(self, expiration_date): """Sets the expiration_date of this ComputerCertificate. :param expiration_date: The expiration_date of this ComputerCertificate. # noqa: E501 :type expiration_date: datetime """ self._expiration_date = expiration_date @property def username(self): """Gets the username of this ComputerCertificate. # noqa: E501 :return: The username of this ComputerCertificate. # noqa: E501 :rtype: str """ return self._username @username.setter def username(self, username): """Sets the username of this ComputerCertificate. :param username: The username of this ComputerCertificate. # noqa: E501 :type username: str """ self._username = username @property def lifecycle_status(self): """Gets the lifecycle_status of this ComputerCertificate. # noqa: E501 :return: The lifecycle_status of this ComputerCertificate. # noqa: E501 :rtype: str """ return self._lifecycle_status @lifecycle_status.setter def lifecycle_status(self, lifecycle_status): """Sets the lifecycle_status of this ComputerCertificate. :param lifecycle_status: The lifecycle_status of this ComputerCertificate. # noqa: E501 :type lifecycle_status: str """ allowed_values = ["ACTIVE", "INACTIVE"] # noqa: E501 if self.local_vars_configuration.client_side_validation and lifecycle_status not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `lifecycle_status` ({0}), must be one of {1}" # noqa: E501 .format(lifecycle_status, allowed_values) ) self._lifecycle_status = lifecycle_status @property def certificate_status(self): """Gets the certificate_status of this ComputerCertificate. # noqa: E501 :return: The certificate_status of this ComputerCertificate. # noqa: E501 :rtype: str """ return self._certificate_status @certificate_status.setter def certificate_status(self, certificate_status): """Sets the certificate_status of this ComputerCertificate. :param certificate_status: The certificate_status of this ComputerCertificate. # noqa: E501 :type certificate_status: str """ allowed_values = ["EXPIRING", "EXPIRED", "REVOKED", "PENDING_REVOKE", "ISSUED"] # noqa: E501 if self.local_vars_configuration.client_side_validation and certificate_status not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `certificate_status` ({0}), must be one of {1}" # noqa: E501 .format(certificate_status, allowed_values) ) self._certificate_status = certificate_status def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerCertificate): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerCertificate): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_content_caching_cache_detail.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerContentCachingCacheDetail(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'computer_content_caching_cache_details_id': 'str', 'category_name': 'str', 'disk_space_bytes_used': 'int' } attribute_map = { 'computer_content_caching_cache_details_id': 'computerContentCachingCacheDetailsId', 'category_name': 'categoryName', 'disk_space_bytes_used': 'diskSpaceBytesUsed' } def __init__(self, computer_content_caching_cache_details_id=None, category_name=None, disk_space_bytes_used=None, local_vars_configuration=None): # noqa: E501 """ComputerContentCachingCacheDetail - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._computer_content_caching_cache_details_id = None self._category_name = None self._disk_space_bytes_used = None self.discriminator = None if computer_content_caching_cache_details_id is not None: self.computer_content_caching_cache_details_id = computer_content_caching_cache_details_id if category_name is not None: self.category_name = category_name if disk_space_bytes_used is not None: self.disk_space_bytes_used = disk_space_bytes_used @property def computer_content_caching_cache_details_id(self): """Gets the computer_content_caching_cache_details_id of this ComputerContentCachingCacheDetail. # noqa: E501 :return: The computer_content_caching_cache_details_id of this ComputerContentCachingCacheDetail. # noqa: E501 :rtype: str """ return self._computer_content_caching_cache_details_id @computer_content_caching_cache_details_id.setter def computer_content_caching_cache_details_id(self, computer_content_caching_cache_details_id): """Sets the computer_content_caching_cache_details_id of this ComputerContentCachingCacheDetail. :param computer_content_caching_cache_details_id: The computer_content_caching_cache_details_id of this ComputerContentCachingCacheDetail. # noqa: E501 :type computer_content_caching_cache_details_id: str """ self._computer_content_caching_cache_details_id = computer_content_caching_cache_details_id @property def category_name(self): """Gets the category_name of this ComputerContentCachingCacheDetail. # noqa: E501 :return: The category_name of this ComputerContentCachingCacheDetail. # noqa: E501 :rtype: str """ return self._category_name @category_name.setter def category_name(self, category_name): """Sets the category_name of this ComputerContentCachingCacheDetail. :param category_name: The category_name of this ComputerContentCachingCacheDetail. # noqa: E501 :type category_name: str """ self._category_name = category_name @property def disk_space_bytes_used(self): """Gets the disk_space_bytes_used of this ComputerContentCachingCacheDetail. # noqa: E501 :return: The disk_space_bytes_used of this ComputerContentCachingCacheDetail. # noqa: E501 :rtype: int """ return self._disk_space_bytes_used @disk_space_bytes_used.setter def disk_space_bytes_used(self, disk_space_bytes_used): """Sets the disk_space_bytes_used of this ComputerContentCachingCacheDetail. :param disk_space_bytes_used: The disk_space_bytes_used of this ComputerContentCachingCacheDetail. # noqa: E501 :type disk_space_bytes_used: int """ self._disk_space_bytes_used = disk_space_bytes_used def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerContentCachingCacheDetail): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerContentCachingCacheDetail): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_content_caching_parent_local_network.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerContentCachingParentLocalNetwork(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'content_caching_parent_local_network_id': 'str', 'speed': 'int', 'wired': 'bool' } attribute_map = { 'content_caching_parent_local_network_id': 'contentCachingParentLocalNetworkId', 'speed': 'speed', 'wired': 'wired' } def __init__(self, content_caching_parent_local_network_id=None, speed=None, wired=None, local_vars_configuration=None): # noqa: E501 """ComputerContentCachingParentLocalNetwork - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._content_caching_parent_local_network_id = None self._speed = None self._wired = None self.discriminator = None if content_caching_parent_local_network_id is not None: self.content_caching_parent_local_network_id = content_caching_parent_local_network_id if speed is not None: self.speed = speed if wired is not None: self.wired = wired @property def content_caching_parent_local_network_id(self): """Gets the content_caching_parent_local_network_id of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :return: The content_caching_parent_local_network_id of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :rtype: str """ return self._content_caching_parent_local_network_id @content_caching_parent_local_network_id.setter def content_caching_parent_local_network_id(self, content_caching_parent_local_network_id): """Sets the content_caching_parent_local_network_id of this ComputerContentCachingParentLocalNetwork. :param content_caching_parent_local_network_id: The content_caching_parent_local_network_id of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :type content_caching_parent_local_network_id: str """ self._content_caching_parent_local_network_id = content_caching_parent_local_network_id @property def speed(self): """Gets the speed of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :return: The speed of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :rtype: int """ return self._speed @speed.setter def speed(self, speed): """Sets the speed of this ComputerContentCachingParentLocalNetwork. :param speed: The speed of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :type speed: int """ self._speed = speed @property def wired(self): """Gets the wired of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :return: The wired of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :rtype: bool """ return self._wired @wired.setter def wired(self, wired): """Sets the wired of this ComputerContentCachingParentLocalNetwork. :param wired: The wired of this ComputerContentCachingParentLocalNetwork. # noqa: E501 :type wired: bool """ self._wired = wired def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerContentCachingParentLocalNetwork): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerContentCachingParentLocalNetwork): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_content_caching_parent.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerContentCachingParent(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'content_caching_parent_id': 'str', 'address': 'str', 'alerts': 'ComputerContentCachingParentAlert', 'details': 'ComputerContentCachingParentDetails', 'guid': 'str', 'healthy': 'bool', 'port': 'int', 'version': 'str' } attribute_map = { 'content_caching_parent_id': 'contentCachingParentId', 'address': 'address', 'alerts': 'alerts', 'details': 'details', 'guid': 'guid', 'healthy': 'healthy', 'port': 'port', 'version': 'version' } def __init__(self, content_caching_parent_id=None, address=None, alerts=None, details=None, guid=None, healthy=None, port=None, version=None, local_vars_configuration=None): # noqa: E501 """ComputerContentCachingParent - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._content_caching_parent_id = None self._address = None self._alerts = None self._details = None self._guid = None self._healthy = None self._port = None self._version = None self.discriminator = None if content_caching_parent_id is not None: self.content_caching_parent_id = content_caching_parent_id if address is not None: self.address = address if alerts is not None: self.alerts = alerts if details is not None: self.details = details if guid is not None: self.guid = guid if healthy is not None: self.healthy = healthy if port is not None: self.port = port if version is not None: self.version = version @property def content_caching_parent_id(self): """Gets the content_caching_parent_id of this ComputerContentCachingParent. # noqa: E501 :return: The content_caching_parent_id of this ComputerContentCachingParent. # noqa: E501 :rtype: str """ return self._content_caching_parent_id @content_caching_parent_id.setter def content_caching_parent_id(self, content_caching_parent_id): """Sets the content_caching_parent_id of this ComputerContentCachingParent. :param content_caching_parent_id: The content_caching_parent_id of this ComputerContentCachingParent. # noqa: E501 :type content_caching_parent_id: str """ self._content_caching_parent_id = content_caching_parent_id @property def address(self): """Gets the address of this ComputerContentCachingParent. # noqa: E501 :return: The address of this ComputerContentCachingParent. # noqa: E501 :rtype: str """ return self._address @address.setter def address(self, address): """Sets the address of this ComputerContentCachingParent. :param address: The address of this ComputerContentCachingParent. # noqa: E501 :type address: str """ self._address = address @property def alerts(self): """Gets the alerts of this ComputerContentCachingParent. # noqa: E501 :return: The alerts of this ComputerContentCachingParent. # noqa: E501 :rtype: ComputerContentCachingParentAlert """ return self._alerts @alerts.setter def alerts(self, alerts): """Sets the alerts of this ComputerContentCachingParent. :param alerts: The alerts of this ComputerContentCachingParent. # noqa: E501 :type alerts: ComputerContentCachingParentAlert """ self._alerts = alerts @property def details(self): """Gets the details of this ComputerContentCachingParent. # noqa: E501 :return: The details of this ComputerContentCachingParent. # noqa: E501 :rtype: ComputerContentCachingParentDetails """ return self._details @details.setter def details(self, details): """Sets the details of this ComputerContentCachingParent. :param details: The details of this ComputerContentCachingParent. # noqa: E501 :type details: ComputerContentCachingParentDetails """ self._details = details @property def guid(self): """Gets the guid of this ComputerContentCachingParent. # noqa: E501 :return: The guid of this ComputerContentCachingParent. # noqa: E501 :rtype: str """ return self._guid @guid.setter def guid(self, guid): """Sets the guid of this ComputerContentCachingParent. :param guid: The guid of this ComputerContentCachingParent. # noqa: E501 :type guid: str """ self._guid = guid @property def healthy(self): """Gets the healthy of this ComputerContentCachingParent. # noqa: E501 :return: The healthy of this ComputerContentCachingParent. # noqa: E501 :rtype: bool """ return self._healthy @healthy.setter def healthy(self, healthy): """Sets the healthy of this ComputerContentCachingParent. :param healthy: The healthy of this ComputerContentCachingParent. # noqa: E501 :type healthy: bool """ self._healthy = healthy @property def port(self): """Gets the port of this ComputerContentCachingParent. # noqa: E501 :return: The port of this ComputerContentCachingParent. # noqa: E501 :rtype: int """ return self._port @port.setter def port(self, port): """Sets the port of this ComputerContentCachingParent. :param port: The port of this ComputerContentCachingParent. # noqa: E501 :type port: int """ self._port = port @property def version(self): """Gets the version of this ComputerContentCachingParent. # noqa: E501 :return: The version of this ComputerContentCachingParent. # noqa: E501 :rtype: str """ return self._version @version.setter def version(self, version): """Sets the version of this ComputerContentCachingParent. :param version: The version of this ComputerContentCachingParent. # noqa: E501 :type version: str """ self._version = version def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerContentCachingParent): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerContentCachingParent): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_disk.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerDisk(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'id': 'str', 'device': 'str', 'model': 'str', 'revision': 'str', 'serial_number': 'str', 'size_megabytes': 'int', 'smart_status': 'str', 'type': 'str', 'partitions': 'list[ComputerPartition]' } attribute_map = { 'id': 'id', 'device': 'device', 'model': 'model', 'revision': 'revision', 'serial_number': 'serialNumber', 'size_megabytes': 'sizeMegabytes', 'smart_status': 'smartStatus', 'type': 'type', 'partitions': 'partitions' } def __init__(self, id=None, device=None, model=None, revision=None, serial_number=None, size_megabytes=None, smart_status=None, type=None, partitions=None, local_vars_configuration=None): # noqa: E501 """ComputerDisk - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._id = None self._device = None self._model = None self._revision = None self._serial_number = None self._size_megabytes = None self._smart_status = None self._type = None self._partitions = None self.discriminator = None if id is not None: self.id = id if device is not None: self.device = device if model is not None: self.model = model if revision is not None: self.revision = revision if serial_number is not None: self.serial_number = serial_number if size_megabytes is not None: self.size_megabytes = size_megabytes if smart_status is not None: self.smart_status = smart_status if type is not None: self.type = type if partitions is not None: self.partitions = partitions @property def id(self): """Gets the id of this ComputerDisk. # noqa: E501 :return: The id of this ComputerDisk. # noqa: E501 :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this ComputerDisk. :param id: The id of this ComputerDisk. # noqa: E501 :type id: str """ self._id = id @property def device(self): """Gets the device of this ComputerDisk. # noqa: E501 :return: The device of this ComputerDisk. # noqa: E501 :rtype: str """ return self._device @device.setter def device(self, device): """Sets the device of this ComputerDisk. :param device: The device of this ComputerDisk. # noqa: E501 :type device: str """ self._device = device @property def model(self): """Gets the model of this ComputerDisk. # noqa: E501 :return: The model of this ComputerDisk. # noqa: E501 :rtype: str """ return self._model @model.setter def model(self, model): """Sets the model of this ComputerDisk. :param model: The model of this ComputerDisk. # noqa: E501 :type model: str """ self._model = model @property def revision(self): """Gets the revision of this ComputerDisk. # noqa: E501 :return: The revision of this ComputerDisk. # noqa: E501 :rtype: str """ return self._revision @revision.setter def revision(self, revision): """Sets the revision of this ComputerDisk. :param revision: The revision of this ComputerDisk. # noqa: E501 :type revision: str """ self._revision = revision @property def serial_number(self): """Gets the serial_number of this ComputerDisk. # noqa: E501 :return: The serial_number of this ComputerDisk. # noqa: E501 :rtype: str """ return self._serial_number @serial_number.setter def serial_number(self, serial_number): """Sets the serial_number of this ComputerDisk. :param serial_number: The serial_number of this ComputerDisk. # noqa: E501 :type serial_number: str """ self._serial_number = serial_number @property def size_megabytes(self): """Gets the size_megabytes of this ComputerDisk. # noqa: E501 Disk Size in MB. # noqa: E501 :return: The size_megabytes of this ComputerDisk. # noqa: E501 :rtype: int """ return self._size_megabytes @size_megabytes.setter def size_megabytes(self, size_megabytes): """Sets the size_megabytes of this ComputerDisk. Disk Size in MB. # noqa: E501 :param size_megabytes: The size_megabytes of this ComputerDisk. # noqa: E501 :type size_megabytes: int """ self._size_megabytes = size_megabytes @property def smart_status(self): """Gets the smart_status of this ComputerDisk. # noqa: E501 S.M.A.R.T Status # noqa: E501 :return: The smart_status of this ComputerDisk. # noqa: E501 :rtype: str """ return self._smart_status @smart_status.setter def smart_status(self, smart_status): """Sets the smart_status of this ComputerDisk. S.M.A.R.T Status # noqa: E501 :param smart_status: The smart_status of this ComputerDisk. # noqa: E501 :type smart_status: str """ self._smart_status = smart_status @property def type(self): """Gets the type of this ComputerDisk. # noqa: E501 Connection type attribute. # noqa: E501 :return: The type of this ComputerDisk. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this ComputerDisk. Connection type attribute. # noqa: E501 :param type: The type of this ComputerDisk. # noqa: E501 :type type: str """ self._type = type @property def partitions(self): """Gets the partitions of this ComputerDisk. # noqa: E501 :return: The partitions of this ComputerDisk. # noqa: E501 :rtype: list[ComputerPartition] """ return self._partitions @partitions.setter def partitions(self, partitions): """Sets the partitions of this ComputerDisk. :param partitions: The partitions of this ComputerDisk. # noqa: E501 :type partitions: list[ComputerPartition] """ self._partitions = partitions def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerDisk): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerDisk): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_extension_attribute.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerExtensionAttribute(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'definition_id': 'str', 'name': 'str', 'description': 'str', 'enabled': 'bool', 'multi_value': 'bool', 'values': 'list[str]', 'data_type': 'str', 'options': 'list[str]', 'input_type': 'str' } attribute_map = { 'definition_id': 'definitionId', 'name': 'name', 'description': 'description', 'enabled': 'enabled', 'multi_value': 'multiValue', 'values': 'values', 'data_type': 'dataType', 'options': 'options', 'input_type': 'inputType' } def __init__(self, definition_id=None, name=None, description=None, enabled=None, multi_value=None, values=None, data_type=None, options=None, input_type=None, local_vars_configuration=None): # noqa: E501 """ComputerExtensionAttribute - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._definition_id = None self._name = None self._description = None self._enabled = None self._multi_value = None self._values = None self._data_type = None self._options = None self._input_type = None self.discriminator = None if definition_id is not None: self.definition_id = definition_id if name is not None: self.name = name self.description = description if enabled is not None: self.enabled = enabled if multi_value is not None: self.multi_value = multi_value self.values = values self.data_type = data_type self.options = options self.input_type = input_type @property def definition_id(self): """Gets the definition_id of this ComputerExtensionAttribute. # noqa: E501 An identifier of extension attribute definition. # noqa: E501 :return: The definition_id of this ComputerExtensionAttribute. # noqa: E501 :rtype: str """ return self._definition_id @definition_id.setter def definition_id(self, definition_id): """Sets the definition_id of this ComputerExtensionAttribute. An identifier of extension attribute definition. # noqa: E501 :param definition_id: The definition_id of this ComputerExtensionAttribute. # noqa: E501 :type definition_id: str """ self._definition_id = definition_id @property def name(self): """Gets the name of this ComputerExtensionAttribute. # noqa: E501 A human-readable name by which attribute can be referred to. # noqa: E501 :return: The name of this ComputerExtensionAttribute. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this ComputerExtensionAttribute. A human-readable name by which attribute can be referred to. # noqa: E501 :param name: The name of this ComputerExtensionAttribute. # noqa: E501 :type name: str """ self._name = name @property def description(self): """Gets the description of this ComputerExtensionAttribute. # noqa: E501 An additional explanation of exact attribute meaning, possible values, etc. # noqa: E501 :return: The description of this ComputerExtensionAttribute. # noqa: E501 :rtype: str """ return self._description @description.setter def description(self, description): """Sets the description of this ComputerExtensionAttribute. An additional explanation of exact attribute meaning, possible values, etc. # noqa: E501 :param description: The description of this ComputerExtensionAttribute. # noqa: E501 :type description: str """ self._description = description @property def enabled(self): """Gets the enabled of this ComputerExtensionAttribute. # noqa: E501 :return: The enabled of this ComputerExtensionAttribute. # noqa: E501 :rtype: bool """ return self._enabled @enabled.setter def enabled(self, enabled): """Sets the enabled of this ComputerExtensionAttribute. :param enabled: The enabled of this ComputerExtensionAttribute. # noqa: E501 :type enabled: bool """ self._enabled = enabled @property def multi_value(self): """Gets the multi_value of this ComputerExtensionAttribute. # noqa: E501 :return: The multi_value of this ComputerExtensionAttribute. # noqa: E501 :rtype: bool """ return self._multi_value @multi_value.setter def multi_value(self, multi_value): """Sets the multi_value of this ComputerExtensionAttribute. :param multi_value: The multi_value of this ComputerExtensionAttribute. # noqa: E501 :type multi_value: bool """ self._multi_value = multi_value @property def values(self): """Gets the values of this ComputerExtensionAttribute. # noqa: E501 A value of extension attribute, in some rare cases there may be multiple values present, hence the array. # noqa: E501 :return: The values of this ComputerExtensionAttribute. # noqa: E501 :rtype: list[str] """ return self._values @values.setter def values(self, values): """Sets the values of this ComputerExtensionAttribute. A value of extension attribute, in some rare cases there may be multiple values present, hence the array. # noqa: E501 :param values: The values of this ComputerExtensionAttribute. # noqa: E501 :type values: list[str] """ self._values = values @property def data_type(self): """Gets the data_type of this ComputerExtensionAttribute. # noqa: E501 A data type of extension attribute. # noqa: E501 :return: The data_type of this ComputerExtensionAttribute. # noqa: E501 :rtype: str """ return self._data_type @data_type.setter def data_type(self, data_type): """Sets the data_type of this ComputerExtensionAttribute. A data type of extension attribute. # noqa: E501 :param data_type: The data_type of this ComputerExtensionAttribute. # noqa: E501 :type data_type: str """ allowed_values = [None,"STRING", "INTEGER", "DATE_TIME"] # noqa: E501 if self.local_vars_configuration.client_side_validation and data_type not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `data_type` ({0}), must be one of {1}" # noqa: E501 .format(data_type, allowed_values) ) self._data_type = data_type @property def options(self): """Gets the options of this ComputerExtensionAttribute. # noqa: E501 A closed list of possible values (applies to `popup` input type). # noqa: E501 :return: The options of this ComputerExtensionAttribute. # noqa: E501 :rtype: list[str] """ return self._options @options.setter def options(self, options): """Sets the options of this ComputerExtensionAttribute. A closed list of possible values (applies to `popup` input type). # noqa: E501 :param options: The options of this ComputerExtensionAttribute. # noqa: E501 :type options: list[str] """ self._options = options @property def input_type(self): """Gets the input_type of this ComputerExtensionAttribute. # noqa: E501 The input method. `text` is most common and means simply free text, `popup` i a closed list of values from which one or many can be selected and `script` value is calculated and can never be set directly. # noqa: E501 :return: The input_type of this ComputerExtensionAttribute. # noqa: E501 :rtype: str """ return self._input_type @input_type.setter def input_type(self, input_type): """Sets the input_type of this ComputerExtensionAttribute. The input method. `text` is most common and means simply free text, `popup` i a closed list of values from which one or many can be selected and `script` value is calculated and can never be set directly. # noqa: E501 :param input_type: The input_type of this ComputerExtensionAttribute. # noqa: E501 :type input_type: str """ allowed_values = [None,"TEXT", "POPUP", "SCRIPT", "LDAP"] # noqa: E501 if self.local_vars_configuration.client_side_validation and input_type not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `input_type` ({0}), must be one of {1}" # noqa: E501 .format(input_type, allowed_values) ) self._input_type = input_type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerExtensionAttribute): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerExtensionAttribute): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_general_update.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerGeneralUpdate(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str', 'last_ip_address': 'str', 'barcode1': 'str', 'barcode2': 'str', 'asset_tag': 'str', 'extension_attributes': 'list[ComputerExtensionAttribute]' } attribute_map = { 'name': 'name', 'last_ip_address': 'lastIpAddress', 'barcode1': 'barcode1', 'barcode2': 'barcode2', 'asset_tag': 'assetTag', 'extension_attributes': 'extensionAttributes' } def __init__(self, name=None, last_ip_address=None, barcode1=None, barcode2=None, asset_tag=None, extension_attributes=None, local_vars_configuration=None): # noqa: E501 """ComputerGeneralUpdate - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self._last_ip_address = None self._barcode1 = None self._barcode2 = None self._asset_tag = None self._extension_attributes = None self.discriminator = None if name is not None: self.name = name if last_ip_address is not None: self.last_ip_address = last_ip_address if barcode1 is not None: self.barcode1 = barcode1 if barcode2 is not None: self.barcode2 = barcode2 if asset_tag is not None: self.asset_tag = asset_tag if extension_attributes is not None: self.extension_attributes = extension_attributes @property def name(self): """Gets the name of this ComputerGeneralUpdate. # noqa: E501 :return: The name of this ComputerGeneralUpdate. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this ComputerGeneralUpdate. :param name: The name of this ComputerGeneralUpdate. # noqa: E501 :type name: str """ self._name = name @property def last_ip_address(self): """Gets the last_ip_address of this ComputerGeneralUpdate. # noqa: E501 :return: The last_ip_address of this ComputerGeneralUpdate. # noqa: E501 :rtype: str """ return self._last_ip_address @last_ip_address.setter def last_ip_address(self, last_ip_address): """Sets the last_ip_address of this ComputerGeneralUpdate. :param last_ip_address: The last_ip_address of this ComputerGeneralUpdate. # noqa: E501 :type last_ip_address: str """ self._last_ip_address = last_ip_address @property def barcode1(self): """Gets the barcode1 of this ComputerGeneralUpdate. # noqa: E501 :return: The barcode1 of this ComputerGeneralUpdate. # noqa: E501 :rtype: str """ return self._barcode1 @barcode1.setter def barcode1(self, barcode1): """Sets the barcode1 of this ComputerGeneralUpdate. :param barcode1: The barcode1 of this ComputerGeneralUpdate. # noqa: E501 :type barcode1: str """ self._barcode1 = barcode1 @property def barcode2(self): """Gets the barcode2 of this ComputerGeneralUpdate. # noqa: E501 :return: The barcode2 of this ComputerGeneralUpdate. # noqa: E501 :rtype: str """ return self._barcode2 @barcode2.setter def barcode2(self, barcode2): """Sets the barcode2 of this ComputerGeneralUpdate. :param barcode2: The barcode2 of this ComputerGeneralUpdate. # noqa: E501 :type barcode2: str """ self._barcode2 = barcode2 @property def asset_tag(self): """Gets the asset_tag of this ComputerGeneralUpdate. # noqa: E501 :return: The asset_tag of this ComputerGeneralUpdate. # noqa: E501 :rtype: str """ return self._asset_tag @asset_tag.setter def asset_tag(self, asset_tag): """Sets the asset_tag of this ComputerGeneralUpdate. :param asset_tag: The asset_tag of this ComputerGeneralUpdate. # noqa: E501 :type asset_tag: str """ self._asset_tag = asset_tag @property def extension_attributes(self): """Gets the extension_attributes of this ComputerGeneralUpdate. # noqa: E501 :return: The extension_attributes of this ComputerGeneralUpdate. # noqa: E501 :rtype: list[ComputerExtensionAttribute] """ return self._extension_attributes @extension_attributes.setter def extension_attributes(self, extension_attributes): """Sets the extension_attributes of this ComputerGeneralUpdate. :param extension_attributes: The extension_attributes of this ComputerGeneralUpdate. # noqa: E501 :type extension_attributes: list[ComputerExtensionAttribute] """ self._extension_attributes = extension_attributes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerGeneralUpdate): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerGeneralUpdate): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/computer_inventory_update_request.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerInventoryUpdateRequest(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'udid': 'str', 'general': 'ComputerGeneralUpdate', 'purchasing': 'ComputerPurchase', 'user_and_location': 'ComputerUserAndLocation', 'hardware': 'ComputerHardwareUpdate', 'operating_system': 'ComputerOperatingSystemUpdate', 'extension_attributes': 'list[ComputerExtensionAttribute]' } attribute_map = { 'udid': 'udid', 'general': 'general', 'purchasing': 'purchasing', 'user_and_location': 'userAndLocation', 'hardware': 'hardware', 'operating_system': 'operatingSystem', 'extension_attributes': 'extensionAttributes' } def __init__(self, udid=None, general=None, purchasing=None, user_and_location=None, hardware=None, operating_system=None, extension_attributes=None, local_vars_configuration=None): # noqa: E501 """ComputerInventoryUpdateRequest - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._udid = None self._general = None self._purchasing = None self._user_and_location = None self._hardware = None self._operating_system = None self._extension_attributes = None self.discriminator = None if udid is not None: self.udid = udid if general is not None: self.general = general if purchasing is not None: self.purchasing = purchasing if user_and_location is not None: self.user_and_location = user_and_location if hardware is not None: self.hardware = hardware if operating_system is not None: self.operating_system = operating_system if extension_attributes is not None: self.extension_attributes = extension_attributes @property def udid(self): """Gets the udid of this ComputerInventoryUpdateRequest. # noqa: E501 :return: The udid of this ComputerInventoryUpdateRequest. # noqa: E501 :rtype: str """ return self._udid @udid.setter def udid(self, udid): """Sets the udid of this ComputerInventoryUpdateRequest. :param udid: The udid of this ComputerInventoryUpdateRequest. # noqa: E501 :type udid: str """ self._udid = udid @property def general(self): """Gets the general of this ComputerInventoryUpdateRequest. # noqa: E501 :return: The general of this ComputerInventoryUpdateRequest. # noqa: E501 :rtype: ComputerGeneralUpdate """ return self._general @general.setter def general(self, general): """Sets the general of this ComputerInventoryUpdateRequest. :param general: The general of this ComputerInventoryUpdateRequest. # noqa: E501 :type general: ComputerGeneralUpdate """ self._general = general @property def purchasing(self): """Gets the purchasing of this ComputerInventoryUpdateRequest. # noqa: E501 :return: The purchasing of this ComputerInventoryUpdateRequest. # noqa: E501 :rtype: ComputerPurchase """ return self._purchasing @purchasing.setter def purchasing(self, purchasing): """Sets the purchasing of this ComputerInventoryUpdateRequest. :param purchasing: The purchasing of this ComputerInventoryUpdateRequest. # noqa: E501 :type purchasing: ComputerPurchase """ self._purchasing = purchasing @property def user_and_location(self): """Gets the user_and_location of this ComputerInventoryUpdateRequest. # noqa: E501 :return: The user_and_location of this ComputerInventoryUpdateRequest. # noqa: E501 :rtype: ComputerUserAndLocation """ return self._user_and_location @user_and_location.setter def user_and_location(self, user_and_location): """Sets the user_and_location of this ComputerInventoryUpdateRequest. :param user_and_location: The user_and_location of this ComputerInventoryUpdateRequest. # noqa: E501 :type user_and_location: ComputerUserAndLocation """ self._user_and_location = user_and_location @property def hardware(self): """Gets the hardware of this ComputerInventoryUpdateRequest. # noqa: E501 :return: The hardware of this ComputerInventoryUpdateRequest. # noqa: E501 :rtype: ComputerHardwareUpdate """ return self._hardware @hardware.setter def hardware(self, hardware): """Sets the hardware of this ComputerInventoryUpdateRequest. :param hardware: The hardware of this ComputerInventoryUpdateRequest. # noqa: E501 :type hardware: ComputerHardwareUpdate """ self._hardware = hardware @property def operating_system(self): """Gets the operating_system of this ComputerInventoryUpdateRequest. # noqa: E501 :return: The operating_system of this ComputerInventoryUpdateRequest. # noqa: E501 :rtype: ComputerOperatingSystemUpdate """ return self._operating_system @operating_system.setter def operating_system(self, operating_system): """Sets the operating_system of this ComputerInventoryUpdateRequest. :param operating_system: The operating_system of this ComputerInventoryUpdateRequest. # noqa: E501 :type operating_system: ComputerOperatingSystemUpdate """ self._operating_system = operating_system @property def extension_attributes(self): """Gets the extension_attributes of this ComputerInventoryUpdateRequest. # noqa: E501 :return: The extension_attributes of this ComputerInventoryUpdateRequest. # noqa: E501 :rtype: list[ComputerExtensionAttribute] """ return self._extension_attributes @extension_attributes.setter def extension_attributes(self, extension_attributes): """Sets the extension_attributes of this ComputerInventoryUpdateRequest. :param extension_attributes: The extension_attributes of this ComputerInventoryUpdateRequest. # noqa: E501 :type extension_attributes: list[ComputerExtensionAttribute] """ self._extension_attributes = extension_attributes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerInventoryUpdateRequest): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerInventoryUpdateRequest): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/device_enrollment_token.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class DeviceEnrollmentToken(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'token_file_name': 'str', 'encoded_token': 'str' } attribute_map = { 'token_file_name': 'tokenFileName', 'encoded_token': 'encodedToken' } def __init__(self, token_file_name=None, encoded_token=None, local_vars_configuration=None): # noqa: E501 """DeviceEnrollmentToken - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._token_file_name = None self._encoded_token = None self.discriminator = None if token_file_name is not None: self.token_file_name = token_file_name if encoded_token is not None: self.encoded_token = encoded_token @property def token_file_name(self): """Gets the token_file_name of this DeviceEnrollmentToken. # noqa: E501 Optional name of the token to be saved, if no name is provided one will be auto-generated # noqa: E501 :return: The token_file_name of this DeviceEnrollmentToken. # noqa: E501 :rtype: str """ return self._token_file_name @token_file_name.setter def token_file_name(self, token_file_name): """Sets the token_file_name of this DeviceEnrollmentToken. Optional name of the token to be saved, if no name is provided one will be auto-generated # noqa: E501 :param token_file_name: The token_file_name of this DeviceEnrollmentToken. # noqa: E501 :type token_file_name: str """ self._token_file_name = token_file_name @property def encoded_token(self): """Gets the encoded_token of this DeviceEnrollmentToken. # noqa: E501 The base 64 encoded token # noqa: E501 :return: The encoded_token of this DeviceEnrollmentToken. # noqa: E501 :rtype: str """ return self._encoded_token @encoded_token.setter def encoded_token(self, encoded_token): """Sets the encoded_token of this DeviceEnrollmentToken. The base 64 encoded token # noqa: E501 :param encoded_token: The encoded_token of this DeviceEnrollmentToken. # noqa: E501 :type encoded_token: str """ if (self.local_vars_configuration.client_side_validation and encoded_token is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', encoded_token)): # noqa: E501 raise ValueError(r"Invalid value for `encoded_token`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501 self._encoded_token = encoded_token def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DeviceEnrollmentToken): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, DeviceEnrollmentToken): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/ebook.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class Ebook(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'id': 'str', 'name': 'str', 'kind': 'str', 'url': 'str', 'free': 'bool', 'version': 'str', 'author': 'str', 'deploy_as_managed': 'bool', 'install_automatically': 'bool', 'category_id': 'str', 'site_id': 'str' } attribute_map = { 'id': 'id', 'name': 'name', 'kind': 'kind', 'url': 'url', 'free': 'free', 'version': 'version', 'author': 'author', 'deploy_as_managed': 'deployAsManaged', 'install_automatically': 'installAutomatically', 'category_id': 'categoryId', 'site_id': 'siteId' } def __init__(self, id=None, name=None, kind=None, url=None, free=None, version=None, author=None, deploy_as_managed=None, install_automatically=None, category_id=None, site_id=None, local_vars_configuration=None): # noqa: E501 """Ebook - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._id = None self._name = None self._kind = None self._url = None self._free = None self._version = None self._author = None self._deploy_as_managed = None self._install_automatically = None self._category_id = None self._site_id = None self.discriminator = None if id is not None: self.id = id if name is not None: self.name = name if kind is not None: self.kind = kind if url is not None: self.url = url if free is not None: self.free = free if version is not None: self.version = version if author is not None: self.author = author if deploy_as_managed is not None: self.deploy_as_managed = deploy_as_managed if install_automatically is not None: self.install_automatically = install_automatically if category_id is not None: self.category_id = category_id if site_id is not None: self.site_id = site_id @property def id(self): """Gets the id of this Ebook. # noqa: E501 :return: The id of this Ebook. # noqa: E501 :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this Ebook. :param id: The id of this Ebook. # noqa: E501 :type id: str """ self._id = id @property def name(self): """Gets the name of this Ebook. # noqa: E501 :return: The name of this Ebook. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this Ebook. :param name: The name of this Ebook. # noqa: E501 :type name: str """ self._name = name @property def kind(self): """Gets the kind of this Ebook. # noqa: E501 :return: The kind of this Ebook. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this Ebook. :param kind: The kind of this Ebook. # noqa: E501 :type kind: str """ allowed_values = ["UNKNOWN", "PDF", "EPUB", "IBOOKS"] # noqa: E501 if self.local_vars_configuration.client_side_validation and kind not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `kind` ({0}), must be one of {1}" # noqa: E501 .format(kind, allowed_values) ) self._kind = kind @property def url(self): """Gets the url of this Ebook. # noqa: E501 :return: The url of this Ebook. # noqa: E501 :rtype: str """ return self._url @url.setter def url(self, url): """Sets the url of this Ebook. :param url: The url of this Ebook. # noqa: E501 :type url: str """ self._url = url @property def free(self): """Gets the free of this Ebook. # noqa: E501 :return: The free of this Ebook. # noqa: E501 :rtype: bool """ return self._free @free.setter def free(self, free): """Sets the free of this Ebook. :param free: The free of this Ebook. # noqa: E501 :type free: bool """ self._free = free @property def version(self): """Gets the version of this Ebook. # noqa: E501 :return: The version of this Ebook. # noqa: E501 :rtype: str """ return self._version @version.setter def version(self, version): """Sets the version of this Ebook. :param version: The version of this Ebook. # noqa: E501 :type version: str """ self._version = version @property def author(self): """Gets the author of this Ebook. # noqa: E501 :return: The author of this Ebook. # noqa: E501 :rtype: str """ return self._author @author.setter def author(self, author): """Sets the author of this Ebook. :param author: The author of this Ebook. # noqa: E501 :type author: str """ self._author = author @property def deploy_as_managed(self): """Gets the deploy_as_managed of this Ebook. # noqa: E501 If true, it will be automatically installed # noqa: E501 :return: The deploy_as_managed of this Ebook. # noqa: E501 :rtype: bool """ return self._deploy_as_managed @deploy_as_managed.setter def deploy_as_managed(self, deploy_as_managed): """Sets the deploy_as_managed of this Ebook. If true, it will be automatically installed # noqa: E501 :param deploy_as_managed: The deploy_as_managed of this Ebook. # noqa: E501 :type deploy_as_managed: bool """ self._deploy_as_managed = deploy_as_managed @property def install_automatically(self): """Gets the install_automatically of this Ebook. # noqa: E501 :return: The install_automatically of this Ebook. # noqa: E501 :rtype: bool """ return self._install_automatically @install_automatically.setter def install_automatically(self, install_automatically): """Sets the install_automatically of this Ebook. :param install_automatically: The install_automatically of this Ebook. # noqa: E501 :type install_automatically: bool """ self._install_automatically = install_automatically @property def category_id(self): """Gets the category_id of this Ebook. # noqa: E501 :return: The category_id of this Ebook. # noqa: E501 :rtype: str """ return self._category_id @category_id.setter def category_id(self, category_id): """Sets the category_id of this Ebook. :param category_id: The category_id of this Ebook. # noqa: E501 :type category_id: str """ self._category_id = category_id @property def site_id(self): """Gets the site_id of this Ebook. # noqa: E501 :return: The site_id of this Ebook. # noqa: E501 :rtype: str """ return self._site_id @site_id.setter def site_id(self, site_id): """Sets the site_id of this Ebook. :param site_id: The site_id of this Ebook. # noqa: E501 :type site_id: str """ self._site_id = site_id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Ebook): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, Ebook): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/ebook_scope.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class EbookScope(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'all_computers': 'bool', 'all_mobile_devices': 'bool', 'all_users': 'bool', 'computer_ids': 'list[str]', 'computer_group_ids': 'list[str]', 'mobile_device_ids': 'list[str]', 'mobile_device_group_ids': 'list[str]', 'building_ids': 'list[str]', 'department_ids': 'list[str]', 'user_ids': 'list[str]', 'user_group_ids': 'list[str]', 'classroom_ids': 'list[str]', 'limitations': 'EbookLimitations', 'exclusions': 'EbookExclusions' } attribute_map = { 'all_computers': 'allComputers', 'all_mobile_devices': 'allMobileDevices', 'all_users': 'allUsers', 'computer_ids': 'computerIds', 'computer_group_ids': 'computerGroupIds', 'mobile_device_ids': 'mobileDeviceIds', 'mobile_device_group_ids': 'mobileDeviceGroupIds', 'building_ids': 'buildingIds', 'department_ids': 'departmentIds', 'user_ids': 'userIds', 'user_group_ids': 'userGroupIds', 'classroom_ids': 'classroomIds', 'limitations': 'limitations', 'exclusions': 'exclusions' } def __init__(self, all_computers=None, all_mobile_devices=None, all_users=None, computer_ids=None, computer_group_ids=None, mobile_device_ids=None, mobile_device_group_ids=None, building_ids=None, department_ids=None, user_ids=None, user_group_ids=None, classroom_ids=None, limitations=None, exclusions=None, local_vars_configuration=None): # noqa: E501 """EbookScope - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._all_computers = None self._all_mobile_devices = None self._all_users = None self._computer_ids = None self._computer_group_ids = None self._mobile_device_ids = None self._mobile_device_group_ids = None self._building_ids = None self._department_ids = None self._user_ids = None self._user_group_ids = None self._classroom_ids = None self._limitations = None self._exclusions = None self.discriminator = None if all_computers is not None: self.all_computers = all_computers if all_mobile_devices is not None: self.all_mobile_devices = all_mobile_devices if all_users is not None: self.all_users = all_users if computer_ids is not None: self.computer_ids = computer_ids if computer_group_ids is not None: self.computer_group_ids = computer_group_ids if mobile_device_ids is not None: self.mobile_device_ids = mobile_device_ids if mobile_device_group_ids is not None: self.mobile_device_group_ids = mobile_device_group_ids if building_ids is not None: self.building_ids = building_ids if department_ids is not None: self.department_ids = department_ids if user_ids is not None: self.user_ids = user_ids if user_group_ids is not None: self.user_group_ids = user_group_ids if classroom_ids is not None: self.classroom_ids = classroom_ids if limitations is not None: self.limitations = limitations if exclusions is not None: self.exclusions = exclusions @property def all_computers(self): """Gets the all_computers of this EbookScope. # noqa: E501 :return: The all_computers of this EbookScope. # noqa: E501 :rtype: bool """ return self._all_computers @all_computers.setter def all_computers(self, all_computers): """Sets the all_computers of this EbookScope. :param all_computers: The all_computers of this EbookScope. # noqa: E501 :type all_computers: bool """ self._all_computers = all_computers @property def all_mobile_devices(self): """Gets the all_mobile_devices of this EbookScope. # noqa: E501 :return: The all_mobile_devices of this EbookScope. # noqa: E501 :rtype: bool """ return self._all_mobile_devices @all_mobile_devices.setter def all_mobile_devices(self, all_mobile_devices): """Sets the all_mobile_devices of this EbookScope. :param all_mobile_devices: The all_mobile_devices of this EbookScope. # noqa: E501 :type all_mobile_devices: bool """ self._all_mobile_devices = all_mobile_devices @property def all_users(self): """Gets the all_users of this EbookScope. # noqa: E501 :return: The all_users of this EbookScope. # noqa: E501 :rtype: bool """ return self._all_users @all_users.setter def all_users(self, all_users): """Sets the all_users of this EbookScope. :param all_users: The all_users of this EbookScope. # noqa: E501 :type all_users: bool """ self._all_users = all_users @property def computer_ids(self): """Gets the computer_ids of this EbookScope. # noqa: E501 :return: The computer_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._computer_ids @computer_ids.setter def computer_ids(self, computer_ids): """Sets the computer_ids of this EbookScope. :param computer_ids: The computer_ids of this EbookScope. # noqa: E501 :type computer_ids: list[str] """ self._computer_ids = computer_ids @property def computer_group_ids(self): """Gets the computer_group_ids of this EbookScope. # noqa: E501 :return: The computer_group_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._computer_group_ids @computer_group_ids.setter def computer_group_ids(self, computer_group_ids): """Sets the computer_group_ids of this EbookScope. :param computer_group_ids: The computer_group_ids of this EbookScope. # noqa: E501 :type computer_group_ids: list[str] """ self._computer_group_ids = computer_group_ids @property def mobile_device_ids(self): """Gets the mobile_device_ids of this EbookScope. # noqa: E501 :return: The mobile_device_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._mobile_device_ids @mobile_device_ids.setter def mobile_device_ids(self, mobile_device_ids): """Sets the mobile_device_ids of this EbookScope. :param mobile_device_ids: The mobile_device_ids of this EbookScope. # noqa: E501 :type mobile_device_ids: list[str] """ self._mobile_device_ids = mobile_device_ids @property def mobile_device_group_ids(self): """Gets the mobile_device_group_ids of this EbookScope. # noqa: E501 :return: The mobile_device_group_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._mobile_device_group_ids @mobile_device_group_ids.setter def mobile_device_group_ids(self, mobile_device_group_ids): """Sets the mobile_device_group_ids of this EbookScope. :param mobile_device_group_ids: The mobile_device_group_ids of this EbookScope. # noqa: E501 :type mobile_device_group_ids: list[str] """ self._mobile_device_group_ids = mobile_device_group_ids @property def building_ids(self): """Gets the building_ids of this EbookScope. # noqa: E501 :return: The building_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._building_ids @building_ids.setter def building_ids(self, building_ids): """Sets the building_ids of this EbookScope. :param building_ids: The building_ids of this EbookScope. # noqa: E501 :type building_ids: list[str] """ self._building_ids = building_ids @property def department_ids(self): """Gets the department_ids of this EbookScope. # noqa: E501 :return: The department_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._department_ids @department_ids.setter def department_ids(self, department_ids): """Sets the department_ids of this EbookScope. :param department_ids: The department_ids of this EbookScope. # noqa: E501 :type department_ids: list[str] """ self._department_ids = department_ids @property def user_ids(self): """Gets the user_ids of this EbookScope. # noqa: E501 :return: The user_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._user_ids @user_ids.setter def user_ids(self, user_ids): """Sets the user_ids of this EbookScope. :param user_ids: The user_ids of this EbookScope. # noqa: E501 :type user_ids: list[str] """ self._user_ids = user_ids @property def user_group_ids(self): """Gets the user_group_ids of this EbookScope. # noqa: E501 :return: The user_group_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._user_group_ids @user_group_ids.setter def user_group_ids(self, user_group_ids): """Sets the user_group_ids of this EbookScope. :param user_group_ids: The user_group_ids of this EbookScope. # noqa: E501 :type user_group_ids: list[str] """ self._user_group_ids = user_group_ids @property def classroom_ids(self): """Gets the classroom_ids of this EbookScope. # noqa: E501 :return: The classroom_ids of this EbookScope. # noqa: E501 :rtype: list[str] """ return self._classroom_ids @classroom_ids.setter def classroom_ids(self, classroom_ids): """Sets the classroom_ids of this EbookScope. :param classroom_ids: The classroom_ids of this EbookScope. # noqa: E501 :type classroom_ids: list[str] """ self._classroom_ids = classroom_ids @property def limitations(self): """Gets the limitations of this EbookScope. # noqa: E501 :return: The limitations of this EbookScope. # noqa: E501 :rtype: EbookLimitations """ return self._limitations @limitations.setter def limitations(self, limitations): """Sets the limitations of this EbookScope. :param limitations: The limitations of this EbookScope. # noqa: E501 :type limitations: EbookLimitations """ self._limitations = limitations @property def exclusions(self): """Gets the exclusions of this EbookScope. # noqa: E501 :return: The exclusions of this EbookScope. # noqa: E501 :rtype: EbookExclusions """ return self._exclusions @exclusions.setter def exclusions(self, exclusions): """Sets the exclusions of this EbookScope. :param exclusions: The exclusions of this EbookScope. # noqa: E501 :type exclusions: EbookExclusions """ self._exclusions = exclusions def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, EbookScope): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, EbookScope): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/enrollment_customization_panel_text_all_of.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class EnrollmentCustomizationPanelTextAllOf(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'body': 'str', 'subtext': 'str', 'title': 'str', 'back_button_text': 'str', 'continue_button_text': 'str' } attribute_map = { 'body': 'body', 'subtext': 'subtext', 'title': 'title', 'back_button_text': 'backButtonText', 'continue_button_text': 'continueButtonText' } def __init__(self, body=None, subtext=None, title=None, back_button_text=None, continue_button_text=None, local_vars_configuration=None): # noqa: E501 """EnrollmentCustomizationPanelTextAllOf - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._body = None self._subtext = None self._title = None self._back_button_text = None self._continue_button_text = None self.discriminator = None self.body = body if subtext is not None: self.subtext = subtext self.title = title self.back_button_text = back_button_text self.continue_button_text = continue_button_text @property def body(self): """Gets the body of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :return: The body of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :rtype: str """ return self._body @body.setter def body(self, body): """Sets the body of this EnrollmentCustomizationPanelTextAllOf. :param body: The body of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :type body: str """ if self.local_vars_configuration.client_side_validation and body is None: # noqa: E501 raise ValueError("Invalid value for `body`, must not be `None`") # noqa: E501 self._body = body @property def subtext(self): """Gets the subtext of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :return: The subtext of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :rtype: str """ return self._subtext @subtext.setter def subtext(self, subtext): """Sets the subtext of this EnrollmentCustomizationPanelTextAllOf. :param subtext: The subtext of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :type subtext: str """ self._subtext = subtext @property def title(self): """Gets the title of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :return: The title of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :rtype: str """ return self._title @title.setter def title(self, title): """Sets the title of this EnrollmentCustomizationPanelTextAllOf. :param title: The title of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :type title: str """ if self.local_vars_configuration.client_side_validation and title is None: # noqa: E501 raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501 self._title = title @property def back_button_text(self): """Gets the back_button_text of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :return: The back_button_text of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :rtype: str """ return self._back_button_text @back_button_text.setter def back_button_text(self, back_button_text): """Sets the back_button_text of this EnrollmentCustomizationPanelTextAllOf. :param back_button_text: The back_button_text of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :type back_button_text: str """ if self.local_vars_configuration.client_side_validation and back_button_text is None: # noqa: E501 raise ValueError("Invalid value for `back_button_text`, must not be `None`") # noqa: E501 self._back_button_text = back_button_text @property def continue_button_text(self): """Gets the continue_button_text of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :return: The continue_button_text of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :rtype: str """ return self._continue_button_text @continue_button_text.setter def continue_button_text(self, continue_button_text): """Sets the continue_button_text of this EnrollmentCustomizationPanelTextAllOf. :param continue_button_text: The continue_button_text of this EnrollmentCustomizationPanelTextAllOf. # noqa: E501 :type continue_button_text: str """ if self.local_vars_configuration.client_side_validation and continue_button_text is None: # noqa: E501 raise ValueError("Invalid value for `continue_button_text`, must not be `None`") # noqa: E501 self._continue_button_text = continue_button_text def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, EnrollmentCustomizationPanelTextAllOf): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, EnrollmentCustomizationPanelTextAllOf): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/location_v2.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class LocationV2(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'username': 'str', 'real_name': 'str', 'email_address': 'str', 'position': 'str', 'phone_number': 'str', 'department_id': 'str', 'building_id': 'str', 'room': 'str' } attribute_map = { 'username': 'username', 'real_name': 'realName', 'email_address': 'emailAddress', 'position': 'position', 'phone_number': 'phoneNumber', 'department_id': 'departmentId', 'building_id': 'buildingId', 'room': 'room' } def __init__(self, username=None, real_name=None, email_address=None, position=None, phone_number=None, department_id=None, building_id=None, room=None, local_vars_configuration=None): # noqa: E501 """LocationV2 - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._username = None self._real_name = None self._email_address = None self._position = None self._phone_number = None self._department_id = None self._building_id = None self._room = None self.discriminator = None if username is not None: self.username = username if real_name is not None: self.real_name = real_name if email_address is not None: self.email_address = email_address if position is not None: self.position = position if phone_number is not None: self.phone_number = phone_number if department_id is not None: self.department_id = department_id if building_id is not None: self.building_id = building_id if room is not None: self.room = room @property def username(self): """Gets the username of this LocationV2. # noqa: E501 :return: The username of this LocationV2. # noqa: E501 :rtype: str """ return self._username @username.setter def username(self, username): """Sets the username of this LocationV2. :param username: The username of this LocationV2. # noqa: E501 :type username: str """ self._username = username @property def real_name(self): """Gets the real_name of this LocationV2. # noqa: E501 :return: The real_name of this LocationV2. # noqa: E501 :rtype: str """ return self._real_name @real_name.setter def real_name(self, real_name): """Sets the real_name of this LocationV2. :param real_name: The real_name of this LocationV2. # noqa: E501 :type real_name: str """ self._real_name = real_name @property def email_address(self): """Gets the email_address of this LocationV2. # noqa: E501 :return: The email_address of this LocationV2. # noqa: E501 :rtype: str """ return self._email_address @email_address.setter def email_address(self, email_address): """Sets the email_address of this LocationV2. :param email_address: The email_address of this LocationV2. # noqa: E501 :type email_address: str """ self._email_address = email_address @property def position(self): """Gets the position of this LocationV2. # noqa: E501 :return: The position of this LocationV2. # noqa: E501 :rtype: str """ return self._position @position.setter def position(self, position): """Sets the position of this LocationV2. :param position: The position of this LocationV2. # noqa: E501 :type position: str """ self._position = position @property def phone_number(self): """Gets the phone_number of this LocationV2. # noqa: E501 :return: The phone_number of this LocationV2. # noqa: E501 :rtype: str """ return self._phone_number @phone_number.setter def phone_number(self, phone_number): """Sets the phone_number of this LocationV2. :param phone_number: The phone_number of this LocationV2. # noqa: E501 :type phone_number: str """ self._phone_number = phone_number @property def department_id(self): """Gets the department_id of this LocationV2. # noqa: E501 :return: The department_id of this LocationV2. # noqa: E501 :rtype: str """ return self._department_id @department_id.setter def department_id(self, department_id): """Sets the department_id of this LocationV2. :param department_id: The department_id of this LocationV2. # noqa: E501 :type department_id: str """ self._department_id = department_id @property def building_id(self): """Gets the building_id of this LocationV2. # noqa: E501 :return: The building_id of this LocationV2. # noqa: E501 :rtype: str """ return self._building_id @building_id.setter def building_id(self, building_id): """Sets the building_id of this LocationV2. :param building_id: The building_id of this LocationV2. # noqa: E501 :type building_id: str """ self._building_id = building_id @property def room(self): """Gets the room of this LocationV2. # noqa: E501 :return: The room of this LocationV2. # noqa: E501 :rtype: str """ return self._room @room.setter def room(self, room): """Sets the room of this LocationV2. :param room: The room of this LocationV2. # noqa: E501 :type room: str """ self._room = room def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, LocationV2): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, LocationV2): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/mobile_device_prestage_names_v2.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class MobileDevicePrestageNamesV2(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'assign_names_using': 'str', 'prestage_device_names': 'list[MobileDevicePrestageNameV2]', 'device_name_prefix': 'str', 'device_name_suffix': 'str', 'single_device_name': 'str', 'manage_names': 'bool', 'device_naming_configured': 'bool' } attribute_map = { 'assign_names_using': 'assignNamesUsing', 'prestage_device_names': 'prestageDeviceNames', 'device_name_prefix': 'deviceNamePrefix', 'device_name_suffix': 'deviceNameSuffix', 'single_device_name': 'singleDeviceName', 'manage_names': 'manageNames', 'device_naming_configured': 'deviceNamingConfigured' } def __init__(self, assign_names_using=None, prestage_device_names=None, device_name_prefix=None, device_name_suffix=None, single_device_name=None, manage_names=None, device_naming_configured=None, local_vars_configuration=None): # noqa: E501 """MobileDevicePrestageNamesV2 - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._assign_names_using = None self._prestage_device_names = None self._device_name_prefix = None self._device_name_suffix = None self._single_device_name = None self._manage_names = None self._device_naming_configured = None self.discriminator = None if assign_names_using is not None: self.assign_names_using = assign_names_using if prestage_device_names is not None: self.prestage_device_names = prestage_device_names if device_name_prefix is not None: self.device_name_prefix = device_name_prefix if device_name_suffix is not None: self.device_name_suffix = device_name_suffix if single_device_name is not None: self.single_device_name = single_device_name if manage_names is not None: self.manage_names = manage_names if device_naming_configured is not None: self.device_naming_configured = device_naming_configured @property def assign_names_using(self): """Gets the assign_names_using of this MobileDevicePrestageNamesV2. # noqa: E501 :return: The assign_names_using of this MobileDevicePrestageNamesV2. # noqa: E501 :rtype: str """ return self._assign_names_using @assign_names_using.setter def assign_names_using(self, assign_names_using): """Sets the assign_names_using of this MobileDevicePrestageNamesV2. :param assign_names_using: The assign_names_using of this MobileDevicePrestageNamesV2. # noqa: E501 :type assign_names_using: str """ self._assign_names_using = assign_names_using @property def prestage_device_names(self): """Gets the prestage_device_names of this MobileDevicePrestageNamesV2. # noqa: E501 :return: The prestage_device_names of this MobileDevicePrestageNamesV2. # noqa: E501 :rtype: list[MobileDevicePrestageNameV2] """ return self._prestage_device_names @prestage_device_names.setter def prestage_device_names(self, prestage_device_names): """Sets the prestage_device_names of this MobileDevicePrestageNamesV2. :param prestage_device_names: The prestage_device_names of this MobileDevicePrestageNamesV2. # noqa: E501 :type prestage_device_names: list[MobileDevicePrestageNameV2] """ self._prestage_device_names = prestage_device_names @property def device_name_prefix(self): """Gets the device_name_prefix of this MobileDevicePrestageNamesV2. # noqa: E501 :return: The device_name_prefix of this MobileDevicePrestageNamesV2. # noqa: E501 :rtype: str """ return self._device_name_prefix @device_name_prefix.setter def device_name_prefix(self, device_name_prefix): """Sets the device_name_prefix of this MobileDevicePrestageNamesV2. :param device_name_prefix: The device_name_prefix of this MobileDevicePrestageNamesV2. # noqa: E501 :type device_name_prefix: str """ self._device_name_prefix = device_name_prefix @property def device_name_suffix(self): """Gets the device_name_suffix of this MobileDevicePrestageNamesV2. # noqa: E501 :return: The device_name_suffix of this MobileDevicePrestageNamesV2. # noqa: E501 :rtype: str """ return self._device_name_suffix @device_name_suffix.setter def device_name_suffix(self, device_name_suffix): """Sets the device_name_suffix of this MobileDevicePrestageNamesV2. :param device_name_suffix: The device_name_suffix of this MobileDevicePrestageNamesV2. # noqa: E501 :type device_name_suffix: str """ self._device_name_suffix = device_name_suffix @property def single_device_name(self): """Gets the single_device_name of this MobileDevicePrestageNamesV2. # noqa: E501 :return: The single_device_name of this MobileDevicePrestageNamesV2. # noqa: E501 :rtype: str """ return self._single_device_name @single_device_name.setter def single_device_name(self, single_device_name): """Sets the single_device_name of this MobileDevicePrestageNamesV2. :param single_device_name: The single_device_name of this MobileDevicePrestageNamesV2. # noqa: E501 :type single_device_name: str """ self._single_device_name = single_device_name @property def manage_names(self): """Gets the manage_names of this MobileDevicePrestageNamesV2. # noqa: E501 :return: The manage_names of this MobileDevicePrestageNamesV2. # noqa: E501 :rtype: bool """ return self._manage_names @manage_names.setter def manage_names(self, manage_names): """Sets the manage_names of this MobileDevicePrestageNamesV2. :param manage_names: The manage_names of this MobileDevicePrestageNamesV2. # noqa: E501 :type manage_names: bool """ self._manage_names = manage_names @property def device_naming_configured(self): """Gets the device_naming_configured of this MobileDevicePrestageNamesV2. # noqa: E501 :return: The device_naming_configured of this MobileDevicePrestageNamesV2. # noqa: E501 :rtype: bool """ return self._device_naming_configured @device_naming_configured.setter def device_naming_configured(self, device_naming_configured): """Sets the device_naming_configured of this MobileDevicePrestageNamesV2. :param device_naming_configured: The device_naming_configured of this MobileDevicePrestageNamesV2. # noqa: E501 :type device_naming_configured: bool """ self._device_naming_configured = device_naming_configured def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, MobileDevicePrestageNamesV2): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, MobileDevicePrestageNamesV2): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/order_by.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class OrderBy(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'field': 'str', 'direction': 'str' } attribute_map = { 'field': 'field', 'direction': 'direction' } def __init__(self, field=None, direction=None, local_vars_configuration=None): # noqa: E501 """OrderBy - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._field = None self._direction = None self.discriminator = None if field is not None: self.field = field if direction is not None: self.direction = direction @property def field(self): """Gets the field of this OrderBy. # noqa: E501 :return: The field of this OrderBy. # noqa: E501 :rtype: str """ return self._field @field.setter def field(self, field): """Sets the field of this OrderBy. :param field: The field of this OrderBy. # noqa: E501 :type field: str """ self._field = field @property def direction(self): """Gets the direction of this OrderBy. # noqa: E501 :return: The direction of this OrderBy. # noqa: E501 :rtype: str """ return self._direction @direction.setter def direction(self, direction): """Sets the direction of this OrderBy. :param direction: The direction of this OrderBy. # noqa: E501 :type direction: str """ allowed_values = ["ASC", "DESC"] # noqa: E501 if self.local_vars_configuration.client_side_validation and direction not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `direction` ({0}), must be one of {1}" # noqa: E501 .format(direction, allowed_values) ) self._direction = direction def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, OrderBy): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, OrderBy): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/reenrollment.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class Reenrollment(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'is_flush_policy_history_enabled': 'bool', 'is_flush_location_information_enabled': 'bool', 'is_flush_location_information_history_enabled': 'bool', 'is_flush_extension_attributes_enabled': 'bool', 'flush_mdm_queue': 'str' } attribute_map = { 'is_flush_policy_history_enabled': 'isFlushPolicyHistoryEnabled', 'is_flush_location_information_enabled': 'isFlushLocationInformationEnabled', 'is_flush_location_information_history_enabled': 'isFlushLocationInformationHistoryEnabled', 'is_flush_extension_attributes_enabled': 'isFlushExtensionAttributesEnabled', 'flush_mdm_queue': 'flushMDMQueue' } def __init__(self, is_flush_policy_history_enabled=False, is_flush_location_information_enabled=False, is_flush_location_information_history_enabled=False, is_flush_extension_attributes_enabled=False, flush_mdm_queue=None, local_vars_configuration=None): # noqa: E501 """Reenrollment - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._is_flush_policy_history_enabled = None self._is_flush_location_information_enabled = None self._is_flush_location_information_history_enabled = None self._is_flush_extension_attributes_enabled = None self._flush_mdm_queue = None self.discriminator = None if is_flush_policy_history_enabled is not None: self.is_flush_policy_history_enabled = is_flush_policy_history_enabled if is_flush_location_information_enabled is not None: self.is_flush_location_information_enabled = is_flush_location_information_enabled if is_flush_location_information_history_enabled is not None: self.is_flush_location_information_history_enabled = is_flush_location_information_history_enabled if is_flush_extension_attributes_enabled is not None: self.is_flush_extension_attributes_enabled = is_flush_extension_attributes_enabled self.flush_mdm_queue = flush_mdm_queue @property def is_flush_policy_history_enabled(self): """Gets the is_flush_policy_history_enabled of this Reenrollment. # noqa: E501 :return: The is_flush_policy_history_enabled of this Reenrollment. # noqa: E501 :rtype: bool """ return self._is_flush_policy_history_enabled @is_flush_policy_history_enabled.setter def is_flush_policy_history_enabled(self, is_flush_policy_history_enabled): """Sets the is_flush_policy_history_enabled of this Reenrollment. :param is_flush_policy_history_enabled: The is_flush_policy_history_enabled of this Reenrollment. # noqa: E501 :type is_flush_policy_history_enabled: bool """ self._is_flush_policy_history_enabled = is_flush_policy_history_enabled @property def is_flush_location_information_enabled(self): """Gets the is_flush_location_information_enabled of this Reenrollment. # noqa: E501 :return: The is_flush_location_information_enabled of this Reenrollment. # noqa: E501 :rtype: bool """ return self._is_flush_location_information_enabled @is_flush_location_information_enabled.setter def is_flush_location_information_enabled(self, is_flush_location_information_enabled): """Sets the is_flush_location_information_enabled of this Reenrollment. :param is_flush_location_information_enabled: The is_flush_location_information_enabled of this Reenrollment. # noqa: E501 :type is_flush_location_information_enabled: bool """ self._is_flush_location_information_enabled = is_flush_location_information_enabled @property def is_flush_location_information_history_enabled(self): """Gets the is_flush_location_information_history_enabled of this Reenrollment. # noqa: E501 :return: The is_flush_location_information_history_enabled of this Reenrollment. # noqa: E501 :rtype: bool """ return self._is_flush_location_information_history_enabled @is_flush_location_information_history_enabled.setter def is_flush_location_information_history_enabled(self, is_flush_location_information_history_enabled): """Sets the is_flush_location_information_history_enabled of this Reenrollment. :param is_flush_location_information_history_enabled: The is_flush_location_information_history_enabled of this Reenrollment. # noqa: E501 :type is_flush_location_information_history_enabled: bool """ self._is_flush_location_information_history_enabled = is_flush_location_information_history_enabled @property def is_flush_extension_attributes_enabled(self): """Gets the is_flush_extension_attributes_enabled of this Reenrollment. # noqa: E501 :return: The is_flush_extension_attributes_enabled of this Reenrollment. # noqa: E501 :rtype: bool """ return self._is_flush_extension_attributes_enabled @is_flush_extension_attributes_enabled.setter def is_flush_extension_attributes_enabled(self, is_flush_extension_attributes_enabled): """Sets the is_flush_extension_attributes_enabled of this Reenrollment. :param is_flush_extension_attributes_enabled: The is_flush_extension_attributes_enabled of this Reenrollment. # noqa: E501 :type is_flush_extension_attributes_enabled: bool """ self._is_flush_extension_attributes_enabled = is_flush_extension_attributes_enabled @property def flush_mdm_queue(self): """Gets the flush_mdm_queue of this Reenrollment. # noqa: E501 :return: The flush_mdm_queue of this Reenrollment. # noqa: E501 :rtype: str """ return self._flush_mdm_queue @flush_mdm_queue.setter def flush_mdm_queue(self, flush_mdm_queue): """Sets the flush_mdm_queue of this Reenrollment. :param flush_mdm_queue: The flush_mdm_queue of this Reenrollment. # noqa: E501 :type flush_mdm_queue: str """ if self.local_vars_configuration.client_side_validation and flush_mdm_queue is None: # noqa: E501 raise ValueError("Invalid value for `flush_mdm_queue`, must not be `None`") # noqa: E501 allowed_values = ["DELETE_NOTHING", "DELETE_ERRORS", "DELETE_EVERYTHING_EXCEPT_ACKNOWLEDGED", "DELETE_EVERYTHING"] # noqa: E501 if self.local_vars_configuration.client_side_validation and flush_mdm_queue not in allowed_values: # noqa: E501 raise ValueError( "Invalid value for `flush_mdm_queue` ({0}), must be one of {1}" # noqa: E501 .format(flush_mdm_queue, allowed_values) ) self._flush_mdm_queue = flush_mdm_queue def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Reenrollment): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, Reenrollment): return True return self.to_dict() != other.to_dict() ``` #### File: jamf/models/user_attributes.py ```python import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class UserAttributes(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'full_name': 'str', 'email_address': 'str', 'phone_number': 'str', 'position': 'str', 'room': 'str', 'building_id': 'str', 'department_id': 'str' } attribute_map = { 'full_name': 'fullName', 'email_address': 'emailAddress', 'phone_number': 'phoneNumber', 'position': 'position', 'room': 'room', 'building_id': 'buildingId', 'department_id': 'departmentId' } def __init__(self, full_name=None, email_address=None, phone_number=None, position=None, room=None, building_id=None, department_id=None, local_vars_configuration=None): # noqa: E501 """UserAttributes - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._full_name = None self._email_address = None self._phone_number = None self._position = None self._room = None self._building_id = None self._department_id = None self.discriminator = None if full_name is not None: self.full_name = full_name if email_address is not None: self.email_address = email_address if phone_number is not None: self.phone_number = phone_number if position is not None: self.position = position if room is not None: self.room = room if building_id is not None: self.building_id = building_id if department_id is not None: self.department_id = department_id @property def full_name(self): """Gets the full_name of this UserAttributes. # noqa: E501 :return: The full_name of this UserAttributes. # noqa: E501 :rtype: str """ return self._full_name @full_name.setter def full_name(self, full_name): """Sets the full_name of this UserAttributes. :param full_name: The full_name of this UserAttributes. # noqa: E501 :type full_name: str """ self._full_name = full_name @property def email_address(self): """Gets the email_address of this UserAttributes. # noqa: E501 :return: The email_address of this UserAttributes. # noqa: E501 :rtype: str """ return self._email_address @email_address.setter def email_address(self, email_address): """Sets the email_address of this UserAttributes. :param email_address: The email_address of this UserAttributes. # noqa: E501 :type email_address: str """ self._email_address = email_address @property def phone_number(self): """Gets the phone_number of this UserAttributes. # noqa: E501 :return: The phone_number of this UserAttributes. # noqa: E501 :rtype: str """ return self._phone_number @phone_number.setter def phone_number(self, phone_number): """Sets the phone_number of this UserAttributes. :param phone_number: The phone_number of this UserAttributes. # noqa: E501 :type phone_number: str """ self._phone_number = phone_number @property def position(self): """Gets the position of this UserAttributes. # noqa: E501 :return: The position of this UserAttributes. # noqa: E501 :rtype: str """ return self._position @position.setter def position(self, position): """Sets the position of this UserAttributes. :param position: The position of this UserAttributes. # noqa: E501 :type position: str """ self._position = position @property def room(self): """Gets the room of this UserAttributes. # noqa: E501 :return: The room of this UserAttributes. # noqa: E501 :rtype: str """ return self._room @room.setter def room(self, room): """Sets the room of this UserAttributes. :param room: The room of this UserAttributes. # noqa: E501 :type room: str """ self._room = room @property def building_id(self): """Gets the building_id of this UserAttributes. # noqa: E501 :return: The building_id of this UserAttributes. # noqa: E501 :rtype: str """ return self._building_id @building_id.setter def building_id(self, building_id): """Sets the building_id of this UserAttributes. :param building_id: The building_id of this UserAttributes. # noqa: E501 :type building_id: str """ self._building_id = building_id @property def department_id(self): """Gets the department_id of this UserAttributes. # noqa: E501 :return: The department_id of this UserAttributes. # noqa: E501 :rtype: str """ return self._department_id @department_id.setter def department_id(self, department_id): """Sets the department_id of this UserAttributes. :param department_id: The department_id of this UserAttributes. # noqa: E501 :type department_id: str """ self._department_id = department_id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, UserAttributes): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, UserAttributes): return True return self.to_dict() != other.to_dict() ``` #### File: python-jamf/test/test_cloud_ldap_connection_pool_statistics.py ```python from __future__ import absolute_import import unittest import datetime import jamf from jamf.models.cloud_ldap_connection_pool_statistics import CloudLdapConnectionPoolStatistics # noqa: E501 from jamf.rest import ApiException class TestCloudLdapConnectionPoolStatistics(unittest.TestCase): """CloudLdapConnectionPoolStatistics unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test CloudLdapConnectionPoolStatistics include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = jamf.models.cloud_ldap_connection_pool_statistics.CloudLdapConnectionPoolStatistics() # noqa: E501 if include_optional : return CloudLdapConnectionPoolStatistics( num_connections_closed_defunct = 1, num_connections_closed_expired = 1, num_connections_closed_unneeded = 1, num_failed_checkouts = 1, num_failed_connection_attempts = 1, num_released_valid = 1, num_successful_checkouts = 1, num_successful_checkouts_new_connection = 1, num_successful_connection_attempts = 1, maximum_available_connections = 1, num_successful_checkouts_without_wait = 1, num_successful_checkouts_after_wait = 1, num_available_connections = 1 ) else : return CloudLdapConnectionPoolStatistics( ) def testCloudLdapConnectionPoolStatistics(self): """Test CloudLdapConnectionPoolStatistics""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main() ``` #### File: python-jamf/test/test_computer_content_caching.py ```python from __future__ import absolute_import import unittest import datetime import jamf from jamf.models.computer_content_caching import ComputerContentCaching # noqa: E501 from jamf.rest import ApiException class TestComputerContentCaching(unittest.TestCase): """ComputerContentCaching unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test ComputerContentCaching include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = jamf.models.computer_content_caching.ComputerContentCaching() # noqa: E501 if include_optional : return ComputerContentCaching( computer_content_caching_information_id = '1', parents = [ jamf.models.computer_content_caching_parent.ComputerContentCachingParent( content_caching_parent_id = '1', address = 'SomeAddress', alerts = jamf.models.computer_content_caching_parent_alert.ComputerContentCachingParentAlert( content_caching_parent_alert_id = '1', addresses = [], class_name = 'SomeClass', post_date = '2018-10-31T18:04:13Z', ), details = jamf.models.computer_content_caching_parent_details.ComputerContentCachingParentDetails( content_caching_parent_details_id = '1', ac_power = True, cache_size_bytes = 0, capabilities = jamf.models.computer_content_caching_parent_capabilities.ComputerContentCachingParentCapabilities( content_caching_parent_capabilities_id = '1', imports = True, namespaces = True, personal_content = True, query_parameters = True, shared_content = True, prioritization = True, ), portable = True, local_network = [ jamf.models.computer_content_caching_parent_local_network.ComputerContentCachingParentLocalNetwork( content_caching_parent_local_network_id = '1', speed = 5000, wired = True, ) ], ), guid = 'CD1E1291-4AF9-4468-B5D5-0F780C13DB2F', healthy = True, port = 0, version = '1', ) ], alerts = [ jamf.models.computer_content_caching_alert.ComputerContentCachingAlert( cache_bytes_limit = 0, class_name = 'SomeClass', path_preventing_access = '/some/path', post_date = '2018-10-31T18:04:13Z', reserved_volume_bytes = 0, resource = 'SomeResource', ) ], activated = False, active = False, actual_cache_bytes_used = 0, cache_details = [ jamf.models.computer_content_caching_cache_detail.ComputerContentCachingCacheDetail( computer_content_caching_cache_details_id = '1', category_name = 'SomeCategory', disk_space_bytes_used = 0, ) ], cache_bytes_free = 23353884672, cache_bytes_limit = 0, cache_status = 'OK', cache_bytes_used = 0, data_migration_completed = False, data_migration_progress_percentage = 0, data_migration_error = jamf.models.computer_content_caching_data_migration_error.ComputerContentCachingDataMigrationError( code = 0, domain = 'SomeDomain', user_info = [ jamf.models.computer_content_caching_data_migration_error_user_info.ComputerContentCachingDataMigrationErrorUserInfo( key = 'foo', value = 'bar', ) ], ), max_cache_pressure_last1_hour_percentage = 0, personal_cache_bytes_free = 23353884672, personal_cache_bytes_limit = 0, personal_cache_bytes_used = 0, port = 0, public_address = 'SomeAddress', registration_error = 'NOT_ACTIVATED', registration_response_code = 403, registration_started = '2018-10-31T18:04:13Z', registration_status = 'CONTENT_CACHING_FAILED', restricted_media = False, server_guid = 'CD1E1291-4AF9-4468-B5D5-0F780C13DB2F', startup_status = 'FAILED', tetherator_status = 'CONTENT_CACHING_DISABLED', total_bytes_are_since = '2018-10-31T18:04:13Z', total_bytes_dropped = 0, total_bytes_imported = 0, total_bytes_returned_to_children = 0, total_bytes_returned_to_clients = 0, total_bytes_returned_to_peers = 0, total_bytes_stored_from_origin = 0, total_bytes_stored_from_parents = 0, total_bytes_stored_from_peers = 0 ) else : return ComputerContentCaching( ) def testComputerContentCaching(self): """Test ComputerContentCaching""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main() ``` #### File: python-jamf/test/test_computer_inventory_api.py ```python from __future__ import absolute_import import unittest import jamf from jamf.api.computer_inventory_api import ComputerInventoryApi # noqa: E501 from jamf.rest import ApiException class TestComputerInventoryApi(unittest.TestCase): """ComputerInventoryApi unit test stubs""" def setUp(self): self.api = jamf.api.computer_inventory_api.ComputerInventoryApi() # noqa: E501 def tearDown(self): pass def test_v1_computers_inventory_detail_id_get(self): """Test case for v1_computers_inventory_detail_id_get Return a Computer details with all sections # noqa: E501 """ pass def test_v1_computers_inventory_detail_id_patch(self): """Test case for v1_computers_inventory_detail_id_patch Return a updated computer instance # noqa: E501 """ pass def test_v1_computers_inventory_get(self): """Test case for v1_computers_inventory_get Return a Computer Inventory for paginated list of computers # noqa: E501 """ pass def test_v1_computers_inventory_id_attachments_attachment_id_delete(self): """Test case for v1_computers_inventory_id_attachments_attachment_id_delete Remove attachment # noqa: E501 """ pass def test_v1_computers_inventory_id_attachments_attachment_id_get(self): """Test case for v1_computers_inventory_id_attachments_attachment_id_get Download attachment file # noqa: E501 """ pass def test_v1_computers_inventory_id_attachments_post(self): """Test case for v1_computers_inventory_id_attachments_post Upload attachment and assign to computer # noqa: E501 """ pass def test_v1_computers_inventory_id_delete(self): """Test case for v1_computers_inventory_id_delete Remove specified Computer record # noqa: E501 """ pass def test_v1_computers_inventory_id_get(self): """Test case for v1_computers_inventory_id_get Return a Computer General details # noqa: E501 """ pass if __name__ == '__main__': unittest.main() ``` #### File: python-jamf/test/test_computer_inventory_update_request.py ```python from __future__ import absolute_import import unittest import datetime import jamf from jamf.models.computer_inventory_update_request import ComputerInventoryUpdateRequest # noqa: E501 from jamf.rest import ApiException class TestComputerInventoryUpdateRequest(unittest.TestCase): """ComputerInventoryUpdateRequest unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test ComputerInventoryUpdateRequest include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = jamf.models.computer_inventory_update_request.ComputerInventoryUpdateRequest() # noqa: E501 if include_optional : return ComputerInventoryUpdateRequest( udid = '45436edf-864e-4364-982a-330b01d39e65', general = jamf.models.computer_general_update.ComputerGeneralUpdate( name = 'Boalime', last_ip_address = '247.185.82.186', barcode1 = '5 12345 678900', barcode2 = '5 12345 678900', asset_tag = '304822', extension_attributes = [ jamf.models.computer_extension_attribute.ComputerExtensionAttribute( definition_id = '23', name = 'Some Attribute', description = 'Some Attribute defines how much Foo impacts Bar.', enabled = True, multi_value = True, values = ["foo","bar"], data_type = 'STRING', options = ["foo","bar"], input_type = 'TEXT', ) ], ), purchasing = jamf.models.computer_purchase.ComputerPurchase( leased = True, purchased = True, po_number = '53-1', po_date = 'Tue Jan 01 00:00:00 GMT 2019', vendor = 'Example Vendor', warranty_date = 'Tue Jan 01 00:00:00 GMT 2019', apple_care_id = 'abcd', lease_date = 'Tue Jan 01 00:00:00 GMT 2019', purchase_price = '$500', life_expectancy = 5, purchasing_account = 'admin', purchasing_contact = 'true', extension_attributes = [ jamf.models.computer_extension_attribute.ComputerExtensionAttribute( definition_id = '23', name = 'Some Attribute', description = 'Some Attribute defines how much Foo impacts Bar.', enabled = True, multi_value = True, values = ["foo","bar"], data_type = 'STRING', options = ["foo","bar"], input_type = 'TEXT', ) ], ), user_and_location = jamf.models.computer_user_and_location.ComputerUserAndLocation( username = '<NAME>', realname = '<NAME>', email = '<EMAIL>', position = 'IT Team Lead', phone = '123-456-789', department_id = '1', building_id = '1', room = '5', extension_attributes = [ jamf.models.computer_extension_attribute.ComputerExtensionAttribute( definition_id = '23', name = 'Some Attribute', description = 'Some Attribute defines how much Foo impacts Bar.', enabled = True, multi_value = True, values = ["foo","bar"], data_type = 'STRING', options = ["foo","bar"], input_type = 'TEXT', ) ], ), hardware = jamf.models.computer_hardware_update.ComputerHardwareUpdate( network_adapter_type = 'Foo', mac_address = '6A:2C:4B:B7:65:B5', alt_network_adapter_type = 'Bar', alt_mac_address = '82:45:58:44:dc:01', extension_attributes = [ jamf.models.computer_extension_attribute.ComputerExtensionAttribute( definition_id = '23', name = 'Some Attribute', description = 'Some Attribute defines how much Foo impacts Bar.', enabled = True, multi_value = True, values = ["foo","bar"], data_type = 'STRING', options = ["foo","bar"], input_type = 'TEXT', ) ], ), operating_system = jamf.models.computer_operating_system_update.ComputerOperatingSystemUpdate( extension_attributes = [ jamf.models.computer_extension_attribute.ComputerExtensionAttribute( definition_id = '23', name = 'Some Attribute', description = 'Some Attribute defines how much Foo impacts Bar.', enabled = True, multi_value = True, values = ["foo","bar"], data_type = 'STRING', options = ["foo","bar"], input_type = 'TEXT', ) ], ), extension_attributes = [ jamf.models.computer_extension_attribute.ComputerExtensionAttribute( definition_id = '23', name = 'Some Attribute', description = 'Some Attribute defines how much Foo impacts Bar.', enabled = True, multi_value = True, values = ["foo","bar"], data_type = 'STRING', options = ["foo","bar"], input_type = 'TEXT', ) ] ) else : return ComputerInventoryUpdateRequest( ) def testComputerInventoryUpdateRequest(self): """Test ComputerInventoryUpdateRequest""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main() ``` #### File: python-jamf/test/test_computer_local_user_account.py ```python from __future__ import absolute_import import unittest import datetime import jamf from jamf.models.computer_local_user_account import ComputerLocalUserAccount # noqa: E501 from jamf.rest import ApiException class TestComputerLocalUserAccount(unittest.TestCase): """ComputerLocalUserAccount unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test ComputerLocalUserAccount include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = jamf.models.computer_local_user_account.ComputerLocalUserAccount() # noqa: E501 if include_optional : return ComputerLocalUserAccount( uid = '501', username = 'jamf', full_name = '<NAME>', admin = True, home_directory = '/Users/jamf', home_directory_size_mb = 131072, file_vault2_enabled = True, user_account_type = 'LOCAL', password_min_length = 4, password_max_age = 5, password_min_complex_characters = 5, password_history_depth = 5, password_require_alphanumeric = True, computer_azure_active_directory_id = '1', user_azure_active_directory_id = '1', azure_active_directory_id = 'ACTIVATED' ) else : return ComputerLocalUserAccount( ) def testComputerLocalUserAccount(self): """Test ComputerLocalUserAccount""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main() ``` #### File: python-jamf/test/test_enrollment_settings_v2.py ```python from __future__ import absolute_import import unittest import datetime import jamf from jamf.models.enrollment_settings_v2 import EnrollmentSettingsV2 # noqa: E501 from jamf.rest import ApiException class TestEnrollmentSettingsV2(unittest.TestCase): """EnrollmentSettingsV2 unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test EnrollmentSettingsV2 include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = jamf.models.enrollment_settings_v2.EnrollmentSettingsV2() # noqa: E501 if include_optional : return EnrollmentSettingsV2( install_single_profile = True, signing_mdm_profile_enabled = True, mdm_signing_certificate = jamf.models.certificate_identity_v2.CertificateIdentityV2( filename = '0', keystore_password = '0', identity_keystore = <KEY>, md5_sum = '0', ), restrict_reenrollment = True, flush_location_information = True, flush_location_history_information = True, flush_policy_history = True, flush_extension_attributes = True, flush_mdm_commands_on_reenroll = 'DELETE_EVERYTHING_EXCEPT_ACKNOWLEDGED', mac_os_enterprise_enrollment_enabled = True, management_username = '0', management_password = '0', management_password_set = True, password_type = '<PASSWORD>', random_password_length = 56, create_management_account = True, hide_management_account = True, allow_ssh_only_management_account = True, ensure_ssh_running = True, launch_self_service = True, sign_quick_add = True, developer_certificate_identity = jamf.models.certificate_identity_v2.CertificateIdentityV2( filename = '0', keystore_password = '0', identity_keystore = <KEY>, md5_sum = '0', ), developer_certificate_identity_details = jamf.models.certificate_details.CertificateDetails( subject = '0', serial_number = '0', ), mdm_signing_certificate_details = jamf.models.certificate_details.CertificateDetails( subject = '0', serial_number = '0', ), ios_enterprise_enrollment_enabled = True, ios_personal_enrollment_enabled = True, personal_device_enrollment_type = 'PERSONALDEVICEPROFILES' ) else : return EnrollmentSettingsV2( ) def testEnrollmentSettingsV2(self): """Test EnrollmentSettingsV2""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main() ``` #### File: python-jamf/test/test_patch_policies_preview_api.py ```python from __future__ import absolute_import import unittest import jamf from jamf.api.patch_policies_preview_api import PatchPoliciesPreviewApi # noqa: E501 from jamf.rest import ApiException class TestPatchPoliciesPreviewApi(unittest.TestCase): """PatchPoliciesPreviewApi unit test stubs""" def setUp(self): self.api = jamf.api.patch_policies_preview_api.PatchPoliciesPreviewApi() # noqa: E501 def tearDown(self): pass def test_patch_patch_policies_get(self): """Test case for patch_patch_policies_get Return a list of patch policies # noqa: E501 """ pass def test_patch_patch_policies_id_dashboard_delete(self): """Test case for patch_patch_policies_id_dashboard_delete Remove a patch policy from the dashboard # noqa: E501 """ pass def test_patch_patch_policies_id_dashboard_get(self): """Test case for patch_patch_policies_id_dashboard_get Return whether or not the requested patch policy is on the dashboard # noqa: E501 """ pass def test_patch_patch_policies_id_dashboard_post(self): """Test case for patch_patch_policies_id_dashboard_post Add a patch policy to the dashboard # noqa: E501 """ pass if __name__ == '__main__': unittest.main() ``` #### File: python-jamf/test/test_smart_user_groups_preview_api.py ```python from __future__ import absolute_import import unittest import jamf from jamf.api.smart_user_groups_preview_api import SmartUserGroupsPreviewApi # noqa: E501 from jamf.rest import ApiException class TestSmartUserGroupsPreviewApi(unittest.TestCase): """SmartUserGroupsPreviewApi unit test stubs""" def setUp(self): self.api = jamf.api.smart_user_groups_preview_api.SmartUserGroupsPreviewApi() # noqa: E501 def tearDown(self): pass def test_v1_smart_user_groups_id_recalculate_post(self): """Test case for v1_smart_user_groups_id_recalculate_post Recalculate the smart group for the given id and then return the ids for the users in the smart group # noqa: E501 """ pass def test_v1_users_id_recalculate_smart_groups_post(self): """Test case for v1_users_id_recalculate_smart_groups_post Recalculate a smart group for the given user id and then return the count of smart groups the user falls into # noqa: E501 """ pass if __name__ == '__main__': unittest.main() ``` #### File: python-jamf/test/test_tomcat_settings_preview_api.py ```python from __future__ import absolute_import import unittest import jamf from jamf.api.tomcat_settings_preview_api import TomcatSettingsPreviewApi # noqa: E501 from jamf.rest import ApiException class TestTomcatSettingsPreviewApi(unittest.TestCase): """TomcatSettingsPreviewApi unit test stubs""" def setUp(self): self.api = jamf.api.tomcat_settings_preview_api.TomcatSettingsPreviewApi() # noqa: E501 def tearDown(self): pass def test_settings_issue_tomcat_ssl_certificate_post(self): """Test case for settings_issue_tomcat_ssl_certificate_post Generate a SSL Certificate using Jamf Certificate Authority # noqa: E501 """ pass if __name__ == '__main__': unittest.main() ```
{ "source": "JensenChik/chinfe", "score": 2 }
#### File: JensenChik/chinfe/server.py ```python from flask import Flask, send_file import os.path app = Flask(__name__) @app.route('/') def root(): return send_file('index.html') @app.route('/<path:path>') def static_proxy(path): return send_file(path) if __name__ == '__main__': app.run() ```
{ "source": "jensen-gao/x2t", "score": 3 }
#### File: x2t/gaze_capture/face_processor.py ```python import dlib import cv2 import math import numpy as np from gaze_capture.ITrackerData import loadMetadata class FaceProcessor: """ Processes webcam images, returns features used as input to iTracker model """ def __init__(self, predictor_path): self.face_detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(predictor_path) self.img_dim = 224 self.face_grid_dim = 25 self.left_eye_points = [42, 43, 44, 45, 46, 47] self.right_eye_points = [36, 37, 38, 39, 40, 41] # means from iTracker dataset, used to normalize inputs to iTracker self.face_mean = loadMetadata('gaze_capture/mean_face_224.mat', silent=True)['image_mean'] self.left_eye_mean = loadMetadata('gaze_capture/mean_left_224.mat', silent=True)['image_mean'] self.right_eye_mean = loadMetadata('gaze_capture/mean_right_224.mat', silent=True)['image_mean'] def get_gaze_features(self, frame): """ Takes webcam image, returns cropped face, eyes, and binary mask for where face appears in image """ height, width = frame.shape[:2] diff = height - width # crop image to square because binary mask is square if diff > 0: frame = frame[math.floor(diff / 2): -math.ceil(diff / 2)] elif diff < 0: frame = frame[:, -math.floor(diff / 2): math.ceil(diff / 2)] gs_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) face_box = self._get_facial_detections(gs_frame) # if no face detections, returns None if face_box is None: return None face = self._get_face(frame, face_box) if face is None: return None face = (face - self.face_mean) / 255 face_grid = self._get_face_grid(frame, face_box) landmarks = self.predictor(gs_frame, face_box) og_left_eye = self._get_eye(frame, landmarks, self.left_eye_points) og_right_eye = self._get_eye(frame, landmarks, self.right_eye_points) left_eye = (og_left_eye - self.left_eye_mean) / 255 right_eye = (og_right_eye - self.right_eye_mean) / 255 face = np.moveaxis(face, -1, 0) left_eye = np.moveaxis(left_eye, -1, 0) right_eye = np.moveaxis(right_eye, -1, 0) return face, left_eye, right_eye, face_grid def _get_face(self, frame, face_box): """ Takes image, bounding box of face, returns cropped + resized face image in correct format for iTracker """ try: face = frame[face_box.top(): face_box.bottom(), face_box.left(): face_box.right()] face = cv2.resize(face, (self.img_dim, self.img_dim)) face = np.flip(face, axis=2) except: return None return face def _get_face_grid(self, frame, face_box): """ Takes image, bounding box of face, returns binary mask for where face appears in image """ frame_dim = len(frame) top = math.floor(face_box.top() * self.face_grid_dim / frame_dim) bottom = math.ceil(face_box.bottom() * self.face_grid_dim / frame_dim) left = math.floor(face_box.left() * self.face_grid_dim / frame_dim) right = math.ceil(face_box.right() * self.face_grid_dim / frame_dim) face_grid = np.zeros((self.face_grid_dim, self.face_grid_dim)) face_grid[top: bottom, left: right] = 1 return face_grid def _get_eye(self, frame, landmarks, points): """ Takes image, detected landmark locations, landmark indices/points associated with an eye, returns cropped eye image """ eye_landmarks = self._get_landmarks(landmarks, points) left, top, width, height = cv2.boundingRect(eye_landmarks) w_margin = int(width / 3) h_margin = (width + 2 * w_margin - height) / 2 top_margin = math.ceil(h_margin) bot_margin = math.floor(h_margin) eye = frame[top - top_margin: top + height + bot_margin, left - w_margin: left + width + w_margin] eye = cv2.resize(eye, (self.img_dim, self.img_dim)) eye = np.flip(eye, axis=2) return eye def _get_facial_detections(self, gs_frame): """ Returns first face detected by facial detector in greyscale image """ detections = self.face_detector(gs_frame) if len(detections) == 0: return None return detections[0] @staticmethod def _get_landmarks(landmarks, points): """ Takes landmark locations, landmark indices/points, returns locations of those indices/points """ return np.array([(landmarks.part(point).x, landmarks.part(point).y) for point in points]) ``` #### File: gym_bci_typing/envs/radial_typing_env.py ```python import numpy as np import gym import random from gym import spaces from gym.utils import EzPickle from abc import ABC class RadialTypingEnv(gym.Env, EzPickle, ABC): """ Typing environment based on input sequences. For use with the gaze domain. :param user: (User) User in the environment. :param width: Width of the typing environment. Overwritten if user is a Pygame user. :param height: Height of the typing environment. Overwritten if user is a Pygame user. """ def __init__(self, user, n_actions=8, width=1920, height=1080, sentence_length=8): EzPickle.__init__(self) self.user = user self.has_goal = hasattr(self.user, 'goal') self.sentence_length = sentence_length if hasattr(self.user, 'width') and hasattr(self.user, 'height'): self.width = self.user.width self.height = self.user.height else: self.width = width self.height = height self.action_space = spaces.Discrete(n_actions) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.user.input_dim) self.radius = min(self.width, self.height) / 3 angles = [(2 * np.pi * k / n_actions) for k in range(n_actions)] self.action_coords = np.array([self.radius * np.array([np.cos(angle), np.sin(angle)]) for angle in angles]) self.targets = self.action_coords / self.radius self.n_steps = None self.typed = None self.curr_actions = None self.success = None self.can_undo = None self.no_lm = True def reset(self): self.n_steps = 0 self.typed = [] self.user.reset() self.curr_actions = self._get_actions() self.success = [] self.can_undo = False def step(self, action, error=False): """ Selects a word, always types it. """ if self.has_goal: assert len(self.typed) < len(self.user.goal) else: assert len(self.typed) < self.sentence_length info = {'is_success': False} if not np.isscalar(action): action = action.item() selected = self.curr_actions[action] target = self.user.get_next_action_index() correct = int(action == target) rew = correct if not error else 1 - correct info['target'] = target info['correct'] = correct self.success.append(action == target) self.typed.append(selected) if self.has_goal: done = len(self.typed) == len(self.user.goal) else: done = len(self.typed) == self.sentence_length if done: info['is_success'] = all(self.success) else: self.curr_actions = self._get_actions() self.n_steps += 1 info['n_steps'] = self.n_steps self.can_undo = True return None, rew, done, info def undo(self): """ If possible, undoes the last typed word. """ assert self.can_undo self.can_undo = False self.typed = self.typed[:-1] self.success = self.success[:-1] self.curr_actions = self._get_actions() def render(self, mode='human'): pass def get_input(self): """ Gets an input from the user. """ return self.user.get_input() def _get_actions(self): """ Randomizes words in the radial layout, one of which is the correct next word. """ if hasattr(self.user, 'vocab'): actions = random.sample(self.user.vocab, self.action_space.n) else: actions = [''] * self.action_space.n if self.has_goal: next_word = self.user.goal[len(self.typed)] if next_word not in actions: actions[-1] = next_word random.shuffle(actions) return actions def center_coord(self, coord): """ Converts Pygame coordinate to a centered coordinate in the typing environment. """ return np.array([coord[0] - self.width / 2, self.height / 2 - coord[1]]) def uncenter_coord(self, coord): """ Converts centered coordinate in the typing environment to a Pygame coordinate. """ return np.array([coord[0] + self.width / 2, self.height / 2 - coord[1]]) ```
{ "source": "jensengrey/aerospike-client-python", "score": 2 }
#### File: test/new_tests/test_touch.py ```python import pytest import sys from .test_base_class import TestBaseClass from aerospike import exception as e from .as_status_codes import AerospikeStatus aerospike = pytest.importorskip("aerospike") try: import aerospike except: print("Please install aerospike python client.") sys.exit(1) class TestTouch(object): @pytest.fixture(autouse=True) def setup(self, request, as_connection): self.test_demo_1_digest = aerospike.calc_digest( 'test', 'demo', 1) self.added_keys = [] for i in range(5): key = ('test', 'demo', i) rec = {'name': 'name%s' % (str(i)), 'age': i} as_connection.put(key, rec) self.added_keys.append(key) def teardown(): for key in self.added_keys: as_connection.remove(key) self.added_keys = [] request.addfinalizer(teardown) def test_touch_with_no_parameters(self): """ Invoke touch() without any mandatory parameters. """ with pytest.raises(TypeError) as typeError: self.as_connection.touch() assert "Required argument 'key' (pos 1) not found" in str( typeError.value) def test_touch_with_correct_paramters(self): """ Invoke touch() with correct parameters """ key = ('test', 'demo', 1) response = self.as_connection.touch(key, 120) assert response == AerospikeStatus.AEROSPIKE_OK def test_touch_with_correct_policy(self): """ Invoke touch() with correct policy """ key = ('test', 'demo', 1) policy = {'timeout': 1000, 'retry': aerospike.POLICY_RETRY_ONCE} response = self.as_connection.touch(key, 120, {}, policy) assert response == AerospikeStatus.AEROSPIKE_OK def test_touch_with_policy_key_send(self): """ Invoke touch() with policy key send """ key = ('test', 'demo', 1) policy = { 'timeout': 1000, 'key': aerospike.POLICY_KEY_SEND, 'retry': aerospike.POLICY_RETRY_ONCE, 'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER } self.as_connection.touch(key, 120, {}, policy) (key, _, bins) = self.as_connection.get(key) assert bins == {'age': 1, 'name': 'name1'} assert key == ('test', 'demo', None, self.test_demo_1_digest) def test_touch_with_policy_key_digest(self): """ Invoke touch() with policy key digest """ key = ('test', 'demo', None, bytearray("asd;as[d'as;djk;uyfl", "utf-8")) rec = {'name': 'name%s' % (str(1)), 'age': 1, 'nolist': [1, 2, 3]} self.as_connection.put(key, rec) policy = { 'timeout': 1000, 'key': aerospike.POLICY_KEY_DIGEST, 'retry': aerospike.POLICY_RETRY_NONE } self.as_connection.touch(key, 120, {}, policy) (key, _, bins) = self.as_connection.get(key) assert bins == {'age': 1, 'name': 'name1', 'nolist': [1, 2, 3]} assert key == ('test', 'demo', None, bytearray(b"asd;as[d\'as;djk;uyfl")) self.as_connection.remove(key) def test_touch_with_policy_key_gen_EQ_ignore(self): """ Invoke touch() with gen eq positive ignore """ key = ('test', 'demo', 1) policy = { 'timeout': 1000, 'key': aerospike.POLICY_KEY_SEND, 'retry': aerospike.POLICY_RETRY_ONCE, 'gen': aerospike.POLICY_GEN_IGNORE } meta = {'gen': 10, 'ttl': 1200} self.as_connection.touch(key, 120, meta, policy) (key, meta, bins) = self.as_connection.get(key) assert bins == {'age': 1, 'name': 'name1'} assert key == ('test', 'demo', None, self.test_demo_1_digest) def test_touch_with_policy_key_gen_EQ_positive(self): """ Invoke touch() with gen eq positive """ key = ('test', 'demo', 1) policy = { 'timeout': 1000, 'key': aerospike.POLICY_KEY_SEND, 'retry': aerospike.POLICY_RETRY_ONCE, 'gen': aerospike.POLICY_GEN_EQ } (key, meta) = self.as_connection.exists(key) gen = meta['gen'] meta = {'gen': gen, 'ttl': 1200} self.as_connection.touch(key, 120, meta, policy) (key, meta, bins) = self.as_connection.get(key) assert bins == {'age': 1, 'name': 'name1'} assert key == ('test', 'demo', None, self.test_demo_1_digest) def test_touch_with_policy_key_gen_EQ_not_equal(self): """ Invoke touch() with policy key EQ not equal """ key = ('test', 'demo', 1) policy = { 'timeout': 1000, 'key': aerospike.POLICY_KEY_SEND, 'retry': aerospike.POLICY_RETRY_ONCE, 'gen': aerospike.POLICY_GEN_EQ } meta = { 'gen': 10, 'ttl': 1200 } with pytest.raises(e.RecordGenerationError) as err_info: self.as_connection.touch(key, 120, meta, policy) err_code = err_info.value.code assert err_code == AerospikeStatus.AEROSPIKE_ERR_RECORD_GENERATION (key, meta, bins) = self.as_connection.get(key) assert bins == {'age': 1, 'name': 'name1'} assert key == ('test', 'demo', None, self.test_demo_1_digest) def test_touch_with_policy_key_gen_GT_lesser(self): """ Invoke touch() with gen GT lesser """ key = ('test', 'demo', 1) policy = { 'timeout': 1000, 'key': aerospike.POLICY_KEY_SEND, 'retry': aerospike.POLICY_RETRY_ONCE, 'gen': aerospike.POLICY_GEN_GT } (key, meta) = self.as_connection.exists(key) gen = meta['gen'] meta = { 'gen': gen, 'ttl': 1200 } with pytest.raises(e.RecordGenerationError) as err_info: self.as_connection.touch(key, 120, meta, policy) err_code = err_info.value.code assert err_code == AerospikeStatus.AEROSPIKE_ERR_RECORD_GENERATION (key, meta, bins) = self.as_connection.get(key) assert bins == {'age': 1, 'name': 'name1'} assert key == ('test', 'demo', None, self.test_demo_1_digest) def test_touch_with_policy_key_gen_GT_positive(self): """ Invoke touch() with gen GT positive """ key = ('test', 'demo', 1) policy = { 'timeout': 1000, 'key': aerospike.POLICY_KEY_SEND, 'retry': aerospike.POLICY_RETRY_ONCE, 'gen': aerospike.POLICY_GEN_GT } (key, meta) = self.as_connection.exists(key) gen = meta['gen'] meta = {'gen': gen + 5, 'ttl': 1200} self.as_connection.touch(key, 120, meta, policy) (key, meta, bins) = self.as_connection.get(key) assert bins == {'age': 1, 'name': 'name1'} assert key == ('test', 'demo', None, self.test_demo_1_digest) def test_touch_with_incorrect_policy(self): """ Invoke touch() with incorrect policy """ key = ('test', 'demo', 1) policy = {'total_timeout': 0.5} with pytest.raises(e.ParamError) as err_info: self.as_connection.touch(key, 120, {}, policy) err_code = err_info.value.code assert err_code == AerospikeStatus.AEROSPIKE_ERR_PARAM def test_touch_with_nonexistent_key(self): """ Invoke touch() with non-existent key """ key = ('test', 'demo', 1000) with pytest.raises(e.RecordNotFound) as err_info: self.as_connection.touch(key, 120) err_code = err_info.value.code assert err_code == AerospikeStatus.AEROSPIKE_ERR_RECORD_NOT_FOUND def test_touch_value_string(self): """ Invoke touch() not a string """ key = ('test', 'demo', 1) with pytest.raises(e.ParamError) as err_info: self.as_connection.touch(key, "name") assert err_info.value.code == AerospikeStatus.AEROSPIKE_ERR_PARAM def test_touch_with_extra_parameter(self): """ Invoke touch() with extra parameter. """ key = ('test', 'demo', 1) policy = {'timeout': 1000} with pytest.raises(TypeError) as typeError: self.as_connection.touch(key, 120, {}, policy, "") assert "touch() takes at most 4 arguments (5 given)" in str( typeError.value) def test_touch_policy_is_string(self): """ Invoke touch() with policy is string """ key = ('test', 'demo', 1) with pytest.raises(e.ParamError) as err_info: self.as_connection.touch(key, 120, {}, "") assert err_info.value.code == AerospikeStatus.AEROSPIKE_ERR_PARAM def test_touch_with_correct_paramters_without_connection(self): """ Invoke touch() with correct parameters without connection """ config = {'hosts': [('127.0.0.1', 3000)]} client1 = aerospike.client(config) key = ('test', 'demo', 1) with pytest.raises(e.ClusterError) as err_info: client1.touch(key, 120) err_code = err_info.value.code assert err_code == AerospikeStatus.AEROSPIKE_CLUSTER_ERROR def test_touch_withttlvalue_greaterthan_maxsize(self): """ Invoke touch() with ttl value greater than (2^63-1) """ key = ('test', 'demo', 1) meta = {'gen': 10, 'ttl': 2 ** 64} with pytest.raises(e.ParamError) as err_info: self.as_connection.touch(key, 120, meta, None) err_code = err_info.value.code assert err_code == AerospikeStatus.AEROSPIKE_ERR_PARAM ``` #### File: test/new_tests/test_udf_remove.py ```python from __future__ import print_function import sys from distutils.version import LooseVersion import pytest from .as_status_codes import AerospikeStatus from .udf_helpers import wait_for_udf_removal, wait_for_udf_to_exist from aerospike import exception as e aerospike = pytest.importorskip("aerospike") try: import aerospike except ImportError: print("Please install aerospike python client.") sys.exit(1) def is_greater_451(version_str): ''' Is the server version 4.5.1.0-pre or newer ''' return LooseVersion(version_str) >= LooseVersion("4.5.1") class TestUdfRemove(object): @pytest.fixture(autouse=True) def setup(self, request, as_connection): """ Setup Method, adds a UDF and waits for the UDF to exist """ as_connection.udf_put(self.udf_name, 0, {}) wait_for_udf_to_exist(as_connection, self.udf_name) def teardown(): """ Teardown Method, Checks to see if the UDF is on the server, if it is: removes it and waits for the removal process to complete """ udf_name = TestUdfRemove.udf_name udf_list = as_connection.udf_list({'timeout': 100}) for udf in udf_list: if udf['name'] == udf_name: as_connection.udf_remove(udf_name) wait_for_udf_removal(as_connection, udf_name) break request.addfinalizer(teardown) def setup_class(cls): """ Setup class, sets the name of the example UDF used in the tests """ cls.udf_name = u'example.lua' def test_udf_remove_with_no_policy(self): """ Test to verify a proper call to udf_remove will remove a UDF """ module = "example.lua" status = self.as_connection.udf_remove(module) assert status == AerospikeStatus.AEROSPIKE_OK wait_for_udf_removal(self.as_connection, module) udf_list = self.as_connection.udf_list({'timeout': 100}) present = False for udf in udf_list: if 'example.lua' == udf['name']: present = True assert not present @pytest.mark.xfail(reason="This is the only method which allows" + " invalid timeout") def test_udf_remove_with_invalid_timeout_policy_value(self): """ Verify that an incorrect timeout policy will not prevent UDF removal """ policy = {'timeout': 0.1} module = "example.lua" with pytest.raises(e.ParamError): status = self.as_connection.udf_remove(module, policy) # Wait for the removal to take place wait_for_udf_removal(self.as_connection, module) assert status == 0 def test_udf_remove_with_proper_timeout_policy_value(self): """ Verify that udf_remove with a correct timeout policy argument functions. """ policy = {'timeout': 1000} module = "example.lua" status = self.as_connection.udf_remove(module, policy) assert status == AerospikeStatus.AEROSPIKE_OK # Wait for the removal to take place wait_for_udf_removal(self.as_connection, module) udf_list = self.as_connection.udf_list({'timeout': 0}) present = False for udf in udf_list: if 'example.lua' == udf['name']: present = True assert not present def test_udf_remove_with_unicode_filename(self): """ Test to ensure that unicode filenames may be used to remove UDFs """ policy = {'timeout': 100} module = u"example.lua" status = self.as_connection.udf_remove(module, policy) assert status == AerospikeStatus.AEROSPIKE_OK # Wait for the removal to take place wait_for_udf_removal(self.as_connection, module) udf_list = self.as_connection.udf_list({'timeout': 100}) present = False for udf in udf_list: if 'example.lua' == udf['name']: present = True assert not present @pytest.mark.usefixtures("connection_with_udf") class TestIncorrectCallsToUDFRemove(object): """ These are all tests where udf_remove fails for various reasons, So we skip removing and re-adding the UDF before and after each test """ def setup_class(cls): """ setup the class attribute indicating the udf to load """ cls.udf_to_load = "example.lua" def test_udf_remove_with_proper_parameters_without_connection(self): """ Test to verify that attempting to remove a UDF before connection raises an error """ config = {'hosts': [('127.0.0.1', 3000)]} client1 = aerospike.client(config) policy = {'timeout': 100} module = "example.lua" with pytest.raises(e.ClusterError) as err_info: client1.udf_remove(module, policy) assert err_info.value.code == AerospikeStatus.AEROSPIKE_CLUSTER_ERROR def test_udf_remove_with_non_existent_module(self): """ Test to ensure that the removal of a non existant UDF raises an Error """ policy = {} module = "some_fake_module_that_does_not_exist" if is_greater_451(self.string_server_version): self.as_connection.udf_remove(module, policy) else: with pytest.raises(e.UDFError) as err_info: self.as_connection.udf_remove(module, policy) def test_udf_remove_without_parameters(self): """ Test to verify that udf_remove raises an error with no parameters """ with pytest.raises(TypeError) as typeError: self.as_connection.udf_remove() assert "Required argument 'filename' (pos 1) not found" in str( typeError.value) def test_udf_remove_with_none_as_parameters(self): """ Test to verify that udf_remove raises and error when None is used as parameters """ with pytest.raises(e.ParamError) as err_info: self.as_connection.udf_remove(None, None) assert err_info.value.code == AerospikeStatus.AEROSPIKE_ERR_PARAM @pytest.mark.parametrize('udf', [None, False, 1, 1.5, ('cool', 'other')]) def test_udf_with_non_string_module_name(self, udf): ''' Tests for incorrect udf module name types ''' with pytest.raises(e.ParamError): self.as_connection.udf_remove(udf) @pytest.mark.xfail(reason="These do not raise errors") @pytest.mark.parametrize( "policy", [False, 'policy', 5, (), []] ) def test_udf_remove_with_invalid_policy_type(self, policy): ''' Tests for incorrect policy argument types ''' with pytest.raises(TypeError): self.as_connection.udf_remove('example.lua', policy) ``` #### File: test/old_tests/_test_apply.py ```python import pytest import sys from .test_base_class import TestBaseClass aerospike = pytest.importorskip("aerospike") try: import aerospike except: print("Please install aerospike python client.") sys.exit(1) class TestApply(TestBaseClass): def setup_class(cls): hostlist, user, password = TestBaseClass.get_hosts() config = {'hosts': hostlist} if user is None and password is None: TestApply.client = aerospike.client(config).connect() else: TestApply.client = aerospike.client(config).connect(user, password) TestApply.skip_old_server = True versioninfo = TestApply.client.info('version') for keys in versioninfo: for value in versioninfo[keys]: if value is not None: versionlist = value[ value.find("build") + 6:value.find("\n")].split(".") if int(versionlist[0]) >= 3 and int(versionlist[1]) >= 6: TestApply.skip_old_server = False policy = {} TestApply.client.index_integer_create('test', 'demo', 'age', 'age_index', policy) policy = {} TestApply.client.index_integer_create('test', 'demo', 'age1', 'age_index1', policy) policy = {} filename = "sample.lua" udf_type = 0 TestApply.client.udf_put(filename, udf_type, policy) filename = "test_record_udf.lua" TestApply.client.udf_put(filename, udf_type, policy) filename = "udf_basic_ops.lua" TestApply.client.udf_put(filename, udf_type, policy) def teardown_class(cls): policy = {} TestApply.client.index_remove('test', 'age_index', policy) TestApply.client.index_remove('test', 'age_index1', policy) policy = {'timeout': 0} module = "sample.lua" TestApply.client.udf_remove(module, policy) module = "udf_basic_ops.lua" TestApply.client.udf_remove(module, policy) TestApply.client.close() def setup_method(self, method): """ Setup method. """ for i in range(5): key = ('test', 'demo', i) rec = { 'name': ['name%s' % (str(i))], 'addr': 'name%s' % (str(i)), 'age': i, 'no': i, 'basic_map': {"k30": 6, "k20": 5, "k10": 1} } TestApply.client.put(key, rec) def teardown_method(self, method): """ Teardown method. """ for i in range(5): key = ('test', 'demo', i) TestApply.client.remove(key) def test_apply_with_no_paramters(self): """ Invoke apply() without any mandatory parameters. """ with pytest.raises(TypeError) as typeError: TestApply.client.apply() assert "Required argument 'key' (pos 1) not found" in str( typeError.value) def test_apply_with_correct_parameters(self): """ Invoke apply() with correct arguments """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append', ['name', 'car']) (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', 'car'] assert retval == 0 def test_apply_with_correct_parameters_float_argument(self): """ Invoke apply() with correct arguments with a floating value in the list of arguments """ if TestApply.skip_old_server is True: pytest.skip( "Server does not support apply on float type as lua argument") key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append', ['name', 5.434]) (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', 5.434] assert retval == 0 def test_apply_with_none_parameter(self): """ Invoke apply() with a None argument """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append', ['name', None]) (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', None] assert retval == 0 def test_apply_with_policy(self): """ Invoke apply() with policy """ policy = {'timeout': 1000} key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append', ['name', 'car'], policy) (key, _, bins) = TestApply.client.get(key) assert retval == 0 assert bins['name'] == ['name1', 'car'] def test_apply_with_incorrect_policy(self): """ Invoke apply() with incorrect policy """ policy = {'timeout': 0.1} key = ('test', 'demo', 1) try: TestApply.client.apply(key, 'sample', 'list_append', ['name', 'car'], policy) except aerospike.exception.ParamError as exception: assert exception.code == -2 assert exception.msg == 'timeout is invalid' def test_apply_with_extra_argument(self): """ Invoke apply() with extra argument """ policy = {'timeout': 1000} key = ('test', 'demo', 1) with pytest.raises(TypeError) as typeError: TestApply.client.apply(key, 'sample', 'list_append', ['name', 'car'], policy, "") assert "apply() takes at most 5 arguments (6 given)" in str( typeError.value) def test_apply_with_incorrect_bin(self): """ Invoke apply() with incorrect bin """ policy = {'timeout': 1000} key = ('test', 'demo', 1) try: TestApply.client.apply(key, 'sample', 'list_append', ['addr', 'car'], policy) except aerospike.exception.UDFError as exception: assert exception.code == 100 def test_apply_with_empty_module_function(self): """ Invoke apply() with empty module and function """ try: key = ('test', 'demo', 1) TestApply.client.apply(key, '', '', ['name', 'car']) except aerospike.exception.UDFError as exception: assert exception.code == 100 assert exception.msg == 'UDF: Execution Error 1' def test_apply_with_incorrect_module(self): """ Invoke apply() with incorrect module """ try: key = ('test', 'demo', 1) TestApply.client.apply(key, 'samplewrong', 'list_append', ['name', 'car']) except aerospike.exception.UDFError as exception: assert exception.code == 100 assert exception.msg == 'UDF: Execution Error 1' def test_apply_with_incorrect_function(self): """ Invoke apply() with incorrect function """ try: key = ('test', 'demo', 1) TestApply.client.apply(key, 'sample', 'list_prepend', ['name', 'car']) except aerospike.exception.UDFError as exception: assert exception.code == 100 assert exception.msg == 'function not found' def test_apply_with_key_as_string(self): """ Invoke apply() with key as string """ try: TestApply.client.apply( "", 'sample', 'list_append', ['name', 'car']) except aerospike.exception.ParamError as exception: assert exception.code == -2 assert exception.msg == 'key is invalid' def test_apply_with_incorrect_ns_set(self): """ Invoke apply() with incorrect ns and set """ try: key = ('test1', 'demo1', 1) TestApply.client.apply(key, 'sample', 'list_prepend', ['name', 'car']) except aerospike.exception.NamespaceNotFound as exception: assert exception.code == 20 assert exception.msg == 'AEROSPIKE_ERR_NAMESPACE_NOT_FOUND' def test_apply_with_key_as_none(self): """ Invoke apply() with key as none """ try: TestApply.client.apply( None, 'sample', 'list_append', ['name', 'car']) except aerospike.exception.ParamError as exception: assert exception.code == -2 assert exception.msg == 'key is invalid' def test_apply_with_append_integer(self): """ Invoke apply() with append an integer """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append', ['name', 1]) assert retval == 0 (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', 1] def test_apply_with_append_list(self): """ Invoke apply() with append an list """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append', ['name', [1, 2]]) assert retval == 0 (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', [1, 2]] def test_apply_with_integer(self): """ Invoke apply() with integer """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'test_record_udf', 'bin_udf_operation_integer', ['age', 2, 20]) assert retval == 23 (key, _, bins) = TestApply.client.get(key) assert bins['age'] == 23 def test_apply_with_string(self): """ Invoke apply() with string """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'test_record_udf', 'bin_udf_operation_string', ['addr', " world"]) assert retval == "name1 world" (key, _, bins) = TestApply.client.get(key) assert bins['addr'] == "name1 world" def test_apply_with_map(self): """ Invoke apply() with map """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'test_record_udf', 'map_iterate', ['basic_map', 555]) assert retval is None (key, _, bins) = TestApply.client.get(key) assert bins['basic_map'] == {"k30": 555, "k20": 555, "k10": 555} def test_apply_with_record(self): """ Invoke apply() with record """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'test_record_udf', 'udf_returns_record', []) assert retval is not None def test_apply_with_bytearray(self): """ Invoke apply() with a bytearray as a argument """ key = ('test', 'demo', 'apply_insert') TestApply.client.apply(key, 'udf_basic_ops', 'create_record', [bytearray("asd;as[d'as;d", "utf-8")]) (key, _, bins) = TestApply.client.get(key) assert bins == {'bin': bytearray(b"asd;as[d\'as;d")} TestApply.client.remove(key) def test_apply_with_extra_argument_to_lua(self): """ Invoke apply() with extra argument to lua """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append', ['name', 'car', 1]) assert retval == 0 (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', 'car'] def test_apply_with_extra_argument_in_lua(self): """ Invoke apply() with extra argument in lua """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append_extra', ['name', 'car']) assert retval == 0 (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', 'car'] def test_apply_with_no_argument_in_lua(self): """ Invoke apply() with no argument in lua """ key = ('test', 'demo', 1) with pytest.raises(TypeError) as typeError: TestApply.client.apply(key, 'sample', 'list_append_extra') assert "Required argument 'args' (pos 4) not found" in str( typeError.value) def test_apply_with_append_list2(self): """ Invoke apply() with append an list """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, 'sample', 'list_append', ['name', [1, 2]]) assert retval == 0 (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', [1, 2]] def test_apply_with_unicode_module_and_function(self): """ Invoke apply() with unicode module and function """ key = ('test', 'demo', 1) retval = TestApply.client.apply(key, u'sample', u'list_append', ['name', 'car']) (key, _, bins) = TestApply.client.get(key) assert bins['name'] == ['name1', 'car'] assert retval == 0 def test_apply_with_correct_parameters_without_connection(self): """ Invoke apply() with correct arguments without connection """ key = ('test', 'demo', 1) config = {'hosts': [('127.0.0.1', 3000)]} client1 = aerospike.client(config) try: client1.apply(key, 'sample', 'list_append', ['name', 'car']) except aerospike.exception.ClusterError as exception: assert exception.code == 11 assert exception.msg == 'No connection to aerospike cluster' ``` #### File: test/old_tests/_test_list_get.py ```python import pytest import sys import random from .test_base_class import TestBaseClass from aerospike import exception as e aerospike = pytest.importorskip("aerospike") try: import aerospike except: print("Please install aerospike python client.") sys.exit(1) class TestListGet(object): def setup_class(cls): """ Setup method. """ hostlist, user, password = TestBaseClass.get_hosts() config = {'hosts': hostlist} if user is None and password is None: TestListGet.client = aerospike.client(config).connect() else: TestListGet.client = aerospike.client( config).connect(user, password) def teardown_class(cls): TestListGet.client.close() def setup_method(self, method): for i in range(5): key = ('test', 'demo', i) rec = {'name': 'name%s' % (str(i)), 'contact_no': [i, i + 1], 'city': ['Pune', 'Dehli']} TestListGet.client.put(key, rec) key = ('test', 'demo', 2) TestListGet.client.list_append(key, "contact_no", [45, 50, 80]) def teardown_method(self, method): """ Teardown method. """ # time.sleep(1) for i in range(5): key = ('test', 'demo', i) TestListGet.client.remove(key) def test_list_get_with_correct_paramters(self): """ Invoke list_get() get string with correct parameters """ key = ('test', 'demo', 1) val = TestListGet.client.list_get(key, "city", 0) assert val == 'Pune' def test_list_get_with_correct_policy(self): """ Invoke list_get() get with correct policy """ key = ('test', 'demo', 2) policy = { 'timeout': 1000, 'retry': aerospike.POLICY_RETRY_ONCE, 'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER } val = TestListGet.client.list_get(key, 'contact_no', 2, {}, policy) assert val == [45, 50, 80] def test_list_get_with_no_parameters(self): """ Invoke list_get() without any mandatory parameters. """ with pytest.raises(TypeError) as typeError: TestListGet.client.list_get() assert "Required argument 'key' (pos 1) not found" in str( typeError.value) def test_list_get_with_incorrect_policy(self): """ Invoke list_get() with incorrect policy """ key = ('test', 'demo', 1) policy = { 'timeout': 0.5 } try: TestListGet.client.list_get(key, "contact_no", 0, {}, policy) except e.ParamError as exception: assert exception.code == -2 assert exception.msg == "timeout is invalid" def test_list_get_with_nonexistent_key(self): """ Invoke list_get() with non-existent key """ charSet = 'abcdefghijklmnopqrstuvwxyz1234567890' minLength = 5 maxLength = 30 length = random.randint(minLength, maxLength) key = ('test', 'demo', ''.join(map(lambda unused: random.choice(charSet), range(length))) + ".com") try: TestListGet.client.list_get(key, "contact_no", 0) except e.RecordNotFound as exception: assert exception.code == 2 def test_list_get_with_extra_parameter(self): """ Invoke list_get() with extra parameter. """ key = ('test', 'demo', 1) policy = {'timeout': 1000} with pytest.raises(TypeError) as typeError: TestListGet.client.list_get(key, "contact_no", 1, {}, policy, "") assert "list_get() takes at most 5 arguments (6 given)" in str( typeError.value) def test_list_get_policy_is_string(self): """ Invoke list_get() with policy is string """ key = ('test', 'demo', 1) try: TestListGet.client.list_get(key, "contact_no", 1, {}, "") except e.ParamError as exception: assert exception.code == -2 assert exception.msg == "policy must be a dict" def test_list_get_key_is_none(self): """ Invoke list_get() with key is none """ try: TestListGet.client.list_get(None, "contact_no", 0) except e.ParamError as exception: assert exception.code == -2 assert exception.msg == "key is invalid" def test_list_get_bin_is_none(self): """ Invoke list_get() with bin is none """ key = ('test', 'demo', 1) try: TestListGet.client.list_get(key, None, 1) except e.ParamError as exception: assert exception.code == -2 assert exception.msg == "Bin name should be of type string" def test_list_get_with_negative_index(self): """ Invoke list_get() with negative index """ key = ('test', 'demo', 1) try: TestListGet.client.list_get(key, "contact_no", -56) except e.InvalidRequest as exception: assert exception.code == 4 def test_list_get_meta_type_integer(self): """ Invoke list_get() with metadata input is of type integer """ key = ('test', 'demo', 1) try: TestListGet.client.list_get(key, "contact_no", 0, 888) except e.ParamError as exception: assert exception.code == -2 assert exception.msg == "Metadata should be of type dictionary" def test_list_get_index_type_string(self): """ Invoke list_get() with index is of type string """ key = ('test', 'demo', 1) with pytest.raises(TypeError) as typeError: TestListGet.client.list_get(key, "contact_no", "Fifth") assert "an integer is required" in str(typeError.value) ``` #### File: test/old_tests/_test_select.py ```python import pytest import sys from .test_base_class import TestBaseClass from aerospike import exception as e aerospike = pytest.importorskip("aerospike") try: import aerospike except: print("Please install aerospike python client.") sys.exit(1) class TestSelect(TestBaseClass): def setup_class(cls): """ Setup class. """ hostlist, user, password = TestBaseClass.get_hosts() config = {'hosts': hostlist} if user is None and password is None: TestSelect.client = aerospike.client(config).connect() else: TestSelect.client = aerospike.client( config).connect(user, password) def teardown_class(cls): TestSelect.client.close() def setup_method(self, method): """ Setup method. """ key = ('test', 'demo', 1) rec = { 'a': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")], 'b': {"key": "asd';q;'1';"}, 'c': 1234, 'd': '!@#@#$QSDAsd;as', 'n': None } TestSelect.client.put(key, rec) def teardown_method(self, method): """ Teardoen method. """ key = ("test", "demo", 1) TestSelect.client.remove(key) def test_select_with_key_and_empty_list_of_bins_to_select(self): key = ("test", "demo", 1) try: key, meta, bins = TestSelect.client.select(key, []) assert bins == {} assert meta is not None assert key is not None except e.InvalidRequest: pass def test_select_with_key_and_bins(self): key = ("test", "demo", 1) bins_to_select = ['a'] key, meta, bins = TestSelect.client.select(key, bins_to_select) assert bins == { 'a': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")] } assert meta is not None assert key is not None def test_select_without_any_parameter(self): with pytest.raises(TypeError) as typeError: TestSelect.client.select() assert "Required argument 'key' (pos 1) not found" in str( typeError.value) def test_select_with_none_key(self): bins_to_select = ['a'] try: TestSelect.client.select(None, bins_to_select) except e.ParamError as exception: assert exception.code == -2 def test_select_with_none_policy(self): key = ("test", "demo", 1) bins_to_select = ['b'] key, meta, bins = TestSelect.client.select(key, bins_to_select, None) assert bins == {'b': {"key": "asd';q;'1';"}, } assert meta is not None assert key is not None def test_select_with_none_bins_to_select(self): key = ("test", "demo", 1) bins_to_select = None try: key, _, _ = TestSelect.client.select(key, bins_to_select) except e.ParamError as exception: assert exception.code == -2 assert exception.msg == 'not a list or tuple' def test_select_with_non_existent_key(self): key = ("test", "demo", 'non-existent') bins_to_select = ['a', 'b'] try: key, meta, bins = TestSelect.client.select(key, bins_to_select) """ We are making the api backward compatible. In case of RecordNotFound an exception will not be raised Instead Ok response is returned withe the meta as None. This might change with further releases. """ assert key is not None assert meta is None assert bins is None except e.RecordNotFound as exception: assert exception.code == 2 assert exception.msg == 'AEROSPIKE_ERR_RECORD_NOT_FOUND' def test_select_with_key_and_single_bin_to_select_not_a_list(self): key = ("test", "demo", 1) bin_to_select = 'a' # Not a list try: key, _, _ = TestSelect.client.select(key, bin_to_select) except e.ParamError as exception: assert exception.code == -2 assert exception.msg == 'not a list or tuple' def test_select_with_key_and_multiple_bins_to_select(self): key = ("test", "demo", 1) bins_to_select = ['c', 'd'] key, meta, bins = TestSelect.client.select(key, bins_to_select) assert bins == {'c': 1234, 'd': '!@#@#$QSDAsd;as'} assert meta is not None def test_select_with_key_and_multiple_bins_to_select_policy_key_send(self): key = ("test", "demo", 1) bins_to_select = ['c', 'd'] policy = {'timeout': 1000, 'key': aerospike.POLICY_KEY_SEND} key, meta, bins = TestSelect.client.select(key, bins_to_select, policy) assert bins == {'c': 1234, 'd': '!@#@#$QSDAsd;as'} assert key == ('test', 'demo', 1, bytearray( b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8') ) assert meta is not None def test_select_with_key_and_multiple_bins_to_select_policy_key_digest(self ): key = ('test', 'demo', None, bytearray("asd;as[d'as;djk;uyfl", "utf-8")) rec = { 'a': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")], 'b': {"key": "asd';q;'1';"}, 'c': 1234, 'd': '!@#@#$QSDAsd;as' } TestSelect.client.put(key, rec) bins_to_select = ['c', 'd'] policy = {'timeout': 1000, 'key': aerospike.POLICY_KEY_DIGEST} key, meta, bins = TestSelect.client.select(key, bins_to_select, policy) assert bins == {'c': 1234, 'd': '!@#@#$QSDAsd;as'} assert key == ('test', 'demo', None, bytearray(b"asd;as[d\'as;djk;uyfl")) assert meta is not None key = ('test', 'demo', None, bytearray("asd;as[d'as;djk;uyfl", "utf-8")) TestSelect.client.remove(key) def test_select_with_key_and_combination_of_existent_and_non_existent_bins_to_select( self ): key = ("test", "demo", 1) bins_to_select = ['c', 'd', 'n'] key, meta, bins = TestSelect.client.select(key, bins_to_select) assert bins == {'c': 1234, 'd': '!@#@#$QSDAsd;as', 'n': None} assert meta is not None def test_select_with_key_and_non_existent_bin_in_middle(self): key = ("test", "demo", 1) bins_to_select = ['c', 'e', 'd'] key, meta, bins = TestSelect.client.select(key, bins_to_select) assert bins == {'c': 1234, 'd': '!@#@#$QSDAsd;as'} assert meta is not None def test_select_with_key_and_non_existent_bins_to_select(self): key = ("test", "demo", 1) bins_to_select = ['e', 'f'] key, _, bins = TestSelect.client.select(key, bins_to_select) assert bins == {} def test_select_with_unicode_value(self): key = ('test', 'demo', 'aa') rec = { 'a': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")], 'b': {"key": "asd';q;'1';"}, 'c': 1234, 'd': '!@#@#$QSDAsd;as' } assert 0 == TestSelect.client.put(key, rec) bins_to_select = ['a'] key, meta, bins = TestSelect.client.select(key, bins_to_select) assert bins == { 'a': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")] } assert meta is not None assert key is not None key = ('test', 'demo', 'aa') TestSelect.client.remove(key) def test_select_with_key_and_bins_without_connection(self): key = ("test", "demo", 1) bins_to_select = ['a'] try: key, _, _ = TestSelect.client.select(key, bins_to_select) except e.ClusterError as exception: assert exception.code == 11 assert exception.msg == 'No connection to aerospike cluster' ```
{ "source": "jensengroup/DHA_HTVS", "score": 2 }
#### File: DHA_HTVS/run_sqm_calculations/control_sqm_mol_search.py ```python import time import os import sys import textwrap import pandas as pd import numpy as np sys.path.append("./QMC") from qmconf import QMConf def qsub_prep(batchname, script_path, cpus, mem): """ """ pwd = os.getcwd() qsub_file = '''\ #!/bin/sh #SBATCH --job-name={0} #SBATCH --cpus-per-task={1} #SBATCH --mem={2} #SBATCH --ntasks=1 #SBATCH --error={3}/{0}.stderr #SBATCH --output={3}/{0}.stdout #SBATCH --time=8:00:00 #SBATCH --partition=kemi1 #SBATCH --no-requeue cd /scratch/$SLURM_JOB_ID # copy batch file cp {3}/{0}.csv . # run python code /groups/kemi/koerstz/anaconda3/envs/rdkit-env/bin/python {4} {0}.csv {1} # copy data back #tar -czf {0}_output.tar.gz *xyz #cp {0}_output.tar.gz *pkl {3} #cp *out {3} cp *pkl {3} '''.format(batchname, cpus, mem, pwd, script_path) with open(batchname + "_qsub.tmp", 'w') as qsub: qsub.write(textwrap.dedent(qsub_file)) return batchname + "_qsub.tmp" def submit_job(csv, script, mem, cpus, nodes): qsub_name = qsub_prep(csv, script, cpus, mem) batch_id = os.popen("sbatch " + qsub_name).read() batch_id = batch_id.strip().split()[-1] return int(batch_id) def run_calculations(csv_names, script, mem, cpus, nodes): ''' ''' submitted_jobs = set() for csv in csv_names: batch_id = submit_job(csv, script, mem, cpus, nodes) submitted_jobs.add(batch_id) if len(submitted_jobs) >= nodes: while True: output = os.popen("squeue -u koerstz").readlines()[1:] all_running_jobs = set([int(job.split()[0]) for job in output]) if len(all_running_jobs & submitted_jobs) >= nodes: # intersect time.sleep(10) else: # remove finished jobs finished_jobs = submitted_jobs - all_running_jobs print('finished job: ' , finished_jobs) print('submitted job ', submitted_jobs) for job in finished_jobs: submitted_jobs.remove(job) print('submitted job ', submitted_jobs) break if __name__ == "__main__": # input params cpus = 2 mem = "4GB" nodes = 250 chunk_size = 1 script = './sqm_mol_search.py' data_file = sys.argv[1] ########################################################################## # ########################################################################## # import data data = pd.read_csv(data_file) # split data into chunks chunked_data = [data[i:i+chunk_size] for i in range(0, data.shape[0], chunk_size)] chunk_names = list() for idx, chunk in enumerate(chunked_data): chunk_name = "smiles_batch-{}".format(idx) chunk.to_csv(chunk_name + ".csv", index=False) chunk_names.append(chunk_name) # run calculations on nodes run_calculations(chunk_names, script, mem, cpus, nodes) ```
{ "source": "jensengroup/elementary_step_om", "score": 2 }
#### File: elementary_step_om/elementary_step_om/chem.py ```python import copy import itertools import multiprocessing as mp import numpy as np from rdkit.Geometry import Point3D from rdkit import Chem from rdkit.Chem import rdmolfiles, AllChem, rdmolops from rdkit.Chem import rdChemReactions from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers from rdkit.Chem.EnumerateStereoisomers import StereoEnumerationOptions import hashlib def reassign_atom_idx(mol): """ Reassigns the RDKit mol object atomid to atom mapped id """ renumber = [(atom.GetIdx(), atom.GetAtomMapNum()) for atom in mol.GetAtoms()] new_idx = [idx[0] for idx in sorted(renumber, key=lambda x: x[1])] mol = Chem.RenumberAtoms(mol, new_idx) rdmolops.AssignStereochemistry(mol, force=True) return mol def coords_to_AC(symbols, coords, covalent_factor: float = 1.3): """ """ pt = Chem.GetPeriodicTable() new_coords = np.asarray(coords, dtype=np.float32) num_atoms = len(symbols) new_ac = np.zeros((num_atoms, num_atoms), dtype=np.int8) for i in range(num_atoms): for j in range(num_atoms): if i > j: atom_num_i = pt.GetAtomicNumber(symbols[i]) atom_num_j = pt.GetAtomicNumber(symbols[j]) Rcov_i = pt.GetRcovalent(atom_num_i) * covalent_factor Rcov_j = pt.GetRcovalent(atom_num_j) * covalent_factor dist = np.linalg.norm(new_coords[i] - new_coords[j]) if dist < Rcov_i + Rcov_j: new_ac[i, j] = new_ac[j, i] = 1 return new_ac class MoleculeException(Exception): """ An exception that is raised by the Molecule class """ class ReactionException(Exception): """ An exception that is raised by the Reaction class """ class BaseMolecule: """ A base class for molecules. Encapsulates an RDKit mol object. The object is hashable by the the SMILES (should be changed to the graph hash) and is therefore comparable thorugh the equality operator and in sets. """ def __init__(self, molblock: str = None): if molblock is None: raise MoleculeException("Need to provide a MolBlock") self.rd_mol = None self.conformers = [] self._calculator = None self.results = {} self._mol_hash = None self._inititalize_molecule(molblock) def __repr__(self): return f"{self.__class__.__name__}(label={self.label})" def __eq__(self, other): if not type(self) is type(other): raise TypeError( f"Comparing {self.__class__.__name__} to {other.__class__.__name__}" ) return self.__hash__() == other.__hash__() def __hash__(self) -> int: if self._mol_hash is None: m = hashlib.blake2b() m.update(Chem.MolToSmiles(self.rd_mol).encode("utf-8")) self._mol_hash = int(str(int(m.hexdigest(), 16))[:32]) return self._mol_hash def _inititalize_molecule(self, molblock): """ """ self.rd_mol = Chem.MolFromMolBlock(molblock, sanitize=False) Chem.SanitizeMol(self.rd_mol) # Does it have a 3D conformer? if all(self.rd_mol.GetConformer().GetPositions()[:, 2] == 0.0): self.rd_mol.RemoveAllConformers() else: self.conformers.append(Conformer(molblock=molblock)) self.rd_mol.RemoveAllConformers() AllChem.Compute2DCoords(self.rd_mol) @property def molblock(self) -> str: """ molblock are the MolBlock from self.rd_mol""" return Chem.MolToMolBlock(self.rd_mol) @property def label(self) -> str: return self.__class__.__name__ + "_" + str(self.__hash__()) @property def atom_symbols(self) -> list: return [atom.GetSymbol() for atom in self.rd_mol.GetAtoms()] @property def ac_matrix(self): return rdmolops.GetAdjacencyMatrix(self.rd_mol) @property def calculator(self): return self._calculator @calculator.setter def calculator(self, calc_instance): for conf in self.conformers: conf.calculator = calc_instance @classmethod def from_molfile(cls, file): """ Initialize Molecule from a Molfile """ if not file.endswith(("mol", "sdf")): raise TypeError("Only works with mol/sdf files") suppl = rdmolfiles.SDMolSupplier(file, removeHs=False, sanitize=False) first_mol = next(suppl) obj = cls(molblock=Chem.MolToMolBlock(first_mol)) return obj # AllChem.Compute2DCoords(first_mol) # obj.molecule = first_mol # for conf_idx in range(len(suppl)): # obj._conformers.append( # Conformer(sdf=suppl.GetItemText(conf_idx), # label=f"{file.split('.')[0]}-{conf_idx}") # ) # obj._2d_structure = False # return obj @classmethod def from_rdkit_mol(cls, rdkit_mol): """ Initialize Molecule from a RDKit mol object. """ n_confs = rdkit_mol.GetConformers() if len(n_confs) <= 1: # If no 3D conformers. return cls(molblock=Chem.MolToMolBlock(rdkit_mol)) else: raise NotImplementedError("RDKit have more than 1 Conformer.") def _remove_pseudo_chirality(self) -> None: """ Reassign stereochemistry for the RDKit mol object """ rdmolops.AssignStereochemistry( self.rd_mol, cleanIt=True, flagPossibleStereoCenters=True, force=True ) def add_rdmol(self, molecule) -> None: """ Adds an rdmol fragment to the molecule. """ if not isinstance(molecule, Chem.rdchem.Mol): raise RuntimeError(f"can't add {type(molecule)} to {self.__class__.__name__}") molecule = copy.deepcopy(molecule) self.rd_mol.RemoveAllConformers() molecule.RemoveAllConformers() # Reset attributes if self.has_atom_mapping(): max_idx = 0 for atom in self.rd_mol.GetAtoms(): if atom.GetAtomMapNum() > max_idx: max_idx = atom.GetAtomMapNum() if molecule.GetAtomWithIdx(0).GetAtomMapNum() > 0: for atom in molecule.GetAtoms(): atom.SetAtomMapNum(atom.GetAtomMapNum() + max_idx) else: for atom in molecule.GetAtoms(): atom.SetAtomMapNum(atom.GetIdx() + max_idx + 1) self.rd_mol = Chem.CombineMols(self.rd_mol, molecule) self.rd_mol = reassign_atom_idx(self.rd_mol) AllChem.Compute2DCoords(self.rd_mol) self.conformers = [] self._calculator = None self.results = {} self._mol_hash = None def _remove_atom_mapping(self) -> None: """ Remove atom mapping from RDKit mol object """ [atom.SetAtomMapNum(0) for atom in self.rd_mol.GetAtoms()] def has_atom_mapping(self) -> None: """ Determines is the molecule has atom mappings """ for atom in self.rd_mol.GetAtoms(): if atom.GetAtomMapNum() > 0: return True return False def get_fragments(self): """ Split molecule into fragments """ fragments = [] for frag in Chem.GetMolFrags(self.rd_mol, asMols=True, sanitizeFrags=False): fragments.append(Fragment.from_rdkit_mol(frag)) return fragments def num_rotatable_bond(self): """ Calculates the number of rotatable bonds in the fragment """ if self.rd_mol is None: raise RuntimeError("Fragment has no RDKit mol") rot_bonds_smarts = [ "[!#1]~[!$(*#*)&!D1]-!@[!$(*#*)&!D1]~[!#1]", "[*]~[*]-[O,S]-[#1]", "[*]~[*]-[NX3;H2]-[#1]", ] num_dihedral = 0 for bond_smart in rot_bonds_smarts: dihedral_template = Chem.MolFromSmarts(bond_smart) dihedrals = self.rd_mol.GetSubstructMatches(dihedral_template) num_dihedral += len(dihedrals) return num_dihedral def run_calculations(self, parallel_confs: int = 1) -> None: """ Run the calculation defined by the calculator object on all conformers. """ with mp.Pool(int(parallel_confs)) as pool: updated_confs = pool.map(self._calculation_worker, self.conformers) self.conformers = updated_confs def _calculation_worker(self, conf): conf.run_calculation() return conf def embed_molecule( self, confs_pr_frag: int = 1, seed: int = 42, refine_calculator=None, overwrite: bool = True, direction: list = [0.8, 0, 0], ) -> None: """ If more than one fragment, the fragments are embedded individually and then merged afterwards. """ def merge_fragments(frag_confs, conf_num: int) -> Conformer: """ """ nfrags = len(frag_confs) if nfrags == 0: merged_conformer = frag_confs[0] else: merged_conformer = Chem.MolFromMolBlock( frag_confs[0].molblock, sanitize=False ) for frag_conf in frag_confs[1:]: frag_natoms = len(frag_conf.atom_symbols) new_coords = ( frag_conf.coordinates + np.array(direction) * nfrags * frag_natoms ) frag_conf.coordinates = new_coords frag_conf = Chem.MolFromMolBlock(frag_conf.molblock, sanitize=False) merged_conformer = Chem.CombineMols(merged_conformer, frag_conf) merged_conformer = reassign_atom_idx(merged_conformer) merged_conformer = Conformer(molblock=Chem.MolToMolBlock(merged_conformer)) if refine_calculator is not None: merged_conformer.calculator = refine_calculator merged_conformer.run_calculation() return merged_conformer if overwrite: self.conformers = [] fragments = self.get_fragments() fragment_confs = [] for frag in fragments: frag.make_fragment_conformers(nconfs=confs_pr_frag, seed=seed) fragment_confs.append(frag.conformers) for conf_num, frag_confs_set in enumerate(itertools.product(*fragment_confs)): self.conformers.append(merge_fragments(frag_confs_set, conf_num)) def prune_conformers(self, energy_cutoff: float = 0.0, sort: bool = True): """ Remove conformers with energies that are above the `energy_cutoff` (kcal/mol) meassuret from the above the most stable conformer. If sort = True the most stable conformer is put at idx 0. """ energy_most_stable_conf = 9999.9 for conf in self.conformers: if len(conf.results) != 1: if conf.results['energy'] < energy_most_stable_conf: energy_most_stable_conf = conf.results['energy'] # No conformer converged - just save one. if energy_most_stable_conf == 9999.9: # No conformer converged just save one if len(self.conformers) != 0: self.conformers = [self.conformers[0]] else: # Save conformers if below cutoff energy_most_stable_conf *= 627.503 # convert to kcal/mol conf_cutoff = energy_most_stable_conf + energy_cutoff pruned_conformers = [] for conf in self.conformers: if conf.results['converged']: tmp_energy_kcal = conf.results['energy'] * 627.503 if tmp_energy_kcal <= conf_cutoff: pruned_conformers.append((tmp_energy_kcal, conf)) if sort: # sort confs w.r.t. the conformer energy. sorted(pruned_conformers, key=lambda x: x[0]) self.conformers = [conf[1] for conf in pruned_conformers] class Molecule(BaseMolecule): """ """ def __init__(self, molblock: str = None) -> None: """ """ super().__init__(molblock=molblock) # Is the graph mapped? if self.has_atom_mapping(): self.unmap_molecule() def unmap_molecule(self): """ Remove both atom mapping and reassign the sterochemistry.""" self._remove_atom_mapping() self._remove_pseudo_chirality() def get_mapped_molecule(self): """ Assign atom mapping as atom idx + 1 and assign random pseudochirality returns a MappedMolecule. """ tmp_rdmol = copy.deepcopy(self.rd_mol) [atom.SetAtomMapNum(atom.GetIdx() + 1) for atom in tmp_rdmol.GetAtoms()] rdmolops.AssignStereochemistry( tmp_rdmol, cleanIt=True, flagPossibleStereoCenters=True, force=True ) opts = StereoEnumerationOptions(onlyUnassigned=False, unique=False) tmp_rdmol = next(EnumerateStereoisomers(tmp_rdmol, options=opts)) tmp_rdmol = Chem.MolFromMolBlock(Chem.MolToMolBlock(tmp_rdmol), sanitize=False) return MappedMolecule(molblock=Chem.MolToMolBlock(tmp_rdmol)) class MappedMolecule(BaseMolecule): """ """ def __init__(self, molblock=None): super().__init__(molblock=molblock) # Is the graph mapped? if self.has_atom_mapping(): self.rd_mol = reassign_atom_idx(self.rd_mol) else: raise MoleculeException("Atoms in MolBlock are not mapped") def get_unmapped_molecule(self) -> Molecule: """ Remove atom mapping and reassign the sterochemistry. Return Unmapped Molecule """ tmp_mol = copy.deepcopy(self) tmp_mol._remove_atom_mapping() tmp_mol._remove_pseudo_chirality() return Molecule(molblock=Chem.MolToMolBlock(tmp_mol.rd_mol)) class Solvent: """ Base class that represents a solvent. """ def __init__(self, smiles=None, n_solvent=0, active=0): """ """ self._smiles = smiles self._n_solvent_molecules = n_solvent self._nactive = active class Fragment(BaseMolecule): """ The Fragment class is a special `Molecule` with only one Fragment. Doesn't alter the atom mapping. """ # TODO: This doesn't perform an UFF minimization. This is a problem when dealing # with organometalics. def _embed_fragment(self, frag_rdkit, nconfs=20, seed=20): """ """ p = AllChem.ETKDGv3() p.useRandomCoords = True p.randomSeed = int(seed) # Always assign stereochemistry when embedding. rdmolops.AssignStereochemistry(frag_rdkit, force=False) try: AllChem.EmbedMultipleConfs(frag_rdkit, numConfs=nconfs, params=p) except RuntimeError: print(f"RDKit Failed to embed: {self.label}.") return [] # # Worst code ever... # smarts = Chem.MolToSmarts(frag_rdkit) # for patt in ["\d", ""]: # smarts = re.sub(f":\d{patt}", '', smarts) # smarts_ob = pybel.Smarts(smarts) # ## for conf_idx in range(frag_rdkit.GetNumConformers()): conf_molblock = Chem.MolToMolBlock(frag_rdkit, confId=conf_idx) conf = Conformer(molblock=conf_molblock) # coords = np.zeros((frag_rdkit.GetNumAtoms(), 3)) # obmol = pybel.readstring('mdl', conf.molblock) # obmol.localopt(forcefield='uff', steps=uffSteps) # This makes it not work # smarts_ob.findall(obmol) # how to i get the atom mapping to work correct?? # for i, atom in enumerate(obmol): # coords[i] = atom.coords self.conformers.append(conf) @staticmethod # Does it need to be static?? def _dative2covalent(inp_mol): """ This doesn't change the atom order can be speed op by only looping over dative bonds. """ # TODO this i wrong for Metals. See Jan messeage on Slack! mol = copy.deepcopy(inp_mol) for bond in mol.GetBonds(): if bond.GetBondType() is Chem.rdchem.BondType.DATIVE: beginAtom = bond.GetBeginAtom().SetFormalCharge(0) bond.SetBondType(Chem.rdchem.BondType.SINGLE) mol.UpdatePropertyCache(strict=False) rdmolops.AssignStereochemistry(mol, force=True) return mol def make_fragment_conformers( self, nconfs: int = 20, seed: int = 42, overwrite: bool = True, ) -> None: """ Makes conformers by embedding conformers using RDKit and refine with openbabel UFF. If molecule is more then one fragment they are moved X*n_atoms awat from each other. openbabel dependency might be eliminated at a later stage. overwrite = True - remove old conformers """ if overwrite: self.conformers = [] rdkit_mol = self._dative2covalent(self.rd_mol) self._embed_fragment(rdkit_mol, nconfs=nconfs, seed=seed) class Conformer: """ """ def __init__(self, molblock=None): """ """ self._molblock = molblock self.results = None self._calculator = None self._set_atom_symbols() self._set_init_connectivity() def __repr__(self): return f"{self.__class__.__name__}(label={self.label})" @property def molblock(self) -> str: return self._molblock @property def label(self) -> str: return self.__class__.__name__ + str(hash(self._molblock)) @property def coordinates(self): natoms = len(self.atom_symbols) info = self._molblock.split("\n")[4 : 4 + natoms] coords = np.array([coord.split()[:3] for coord in info], dtype=float) return coords @coordinates.setter def coordinates(self, new_coords): self._update_molblock_coords(new_coords) @property def calculator(self): return self._calculator @calculator.setter def calculator(self, calc_instance): """ """ calc = copy.deepcopy(calc_instance) self._calculator = calc def _set_init_connectivity(self): """ """ # for organometalics consider removing metal. # perhaps move to fragment. sdf = self._molblock.split("\n") del sdf[:3] # del header info_line = sdf[0].strip().split() natoms = int(info_line[0]) nbonds = int(info_line[1]) del sdf[: natoms + 1] # del coord block connectivity = np.zeros((natoms, natoms), dtype=np.int8) for line_idx in range(nbonds): i, j = sdf[line_idx].split()[:2] connectivity[int(i) - 1, int(j) - 1] = 1 connectivity[int(j) - 1, int(i) - 1] = 1 self._init_connectivity = connectivity def _set_atom_symbols(self): """ """ # for organometalics consider removing metal. # perhaps move to fragment. sdf = self._molblock.split("\n") del sdf[:3] # del header info_line = sdf[0].strip().split() natoms = int(info_line[0]) del sdf[0] atom_symbols = [] for line_idx in range(natoms): atom_symbols.append(sdf[line_idx].split()[3]) self.atom_symbols = atom_symbols def _check_connectivity(self, new_coords, covalent_factor=1.3): """ check that the updated structure is ok. """ new_ac = coords_to_AC( self.atom_symbols, new_coords, covalent_factor=covalent_factor ) if np.array_equal(new_ac, self._init_connectivity): return True return False def _update_molblock_coords(self, new_coords): """ """ tmp_mol = Chem.MolFromMolBlock(self.molblock, sanitize=False) conf = tmp_mol.GetConformer() for i in range(tmp_mol.GetNumAtoms()): x, y, z = new_coords[i] conf.SetAtomPosition(i, Point3D(x, y, z)) self._molblock = Chem.MolToMolBlock(tmp_mol) def run_calculation(self, covalent_factor: float = 1.3) -> None: """ Run single calculation on the conformer. """ calc_results = self.calculator(self.atom_symbols, self.coordinates, self.label) # Check that calculation succeded. if calc_results.pop("normal_termination") and calc_results.pop("converged"): self.results = calc_results self.results["converged"] = True # update structure - if the connectivity is ok. if "structure" in self.results: new_coords = calc_results.pop("structure") if self._check_connectivity(new_coords, covalent_factor): self._update_molblock_coords(new_coords) # This is super ugly, you can do better!! :) else: self.results = {"converged": False} else: self.results = {"converged": False} def write_xyz(self, filename=None): """ """ xyz_string = "" structure_block = self._molblock.split("V2000")[1] natoms = 0 for line in structure_block.strip().split("\n"): line = line.split() if len(line) < 5: break coords = line[:3] symbol = line[3] xyz_string += f"{symbol} {' '.join(coords)}\n" natoms += 1 xyz_string = f"{natoms} \n \n" + xyz_string if filename is not None: with open(filename, "w") as fout: fout.write(xyz_string) else: return xyz_string class Reaction: """ Contains atom-mapped reactant and product. """ def __init__( self, reactant: MappedMolecule, product: MappedMolecule, charge: int = 0, spin: int = 1, ): if not isinstance(reactant, MappedMolecule) and isinstance( product, MappedMolecule ): raise ReactionException("reactant and product has to be MappedMolecules!") self.reactant = reactant self.product = product self.charge = charge self.spin = spin self._path_search_calculator = None self._ts_path_energies = [] self._ts_path_coordinates = [] self._ts_energies = [] self._ts_coordinates = [] self.ts_check = None self._reaction_hash = None def __eq__(self, other): """ """ if not type(self) is type(other): raise TypeError(f"Comparing {self.__class__} to {other.__class__}") return self.__hash__() == other.__hash__() def __hash__(self): if self._reaction_hash is None: m = hashlib.blake2b() m.update(Chem.MolToSmiles(self.reactant.rd_mol).encode("utf-8")) m.update(Chem.MolToSmiles(self.product.rd_mol).encode("utf-8")) self._reaction_hash = int(str(int(m.hexdigest(), 16))[:32]) return self._reaction_hash @property def reaction_label(self) -> str: return self.__class__.__name__ + "_" + str(self.__hash__()) @property def path_search_calculator(self): return self._path_search_calculator @path_search_calculator.setter def path_search_calculator(self, calc): self._path_search_calculator = copy.deepcopy(calc) @property def ts_guess_energies(self): """ """ ts_guess_energies = [] for energies in self._ts_path_energies: if energies is None: continue ts_guess_energies.append(energies.max()) return np.array(ts_guess_energies) @property def ts_guess_coordinates(self): """ """ ts_guess_coordinates = [] for energies, coords in zip(self._ts_path_energies, self._ts_path_coordinates): if energies is None: continue ts_guess_coordinates.append(coords[energies.argmax()]) return np.asarray(ts_guess_coordinates) @property def rd_reaction(self): """ """ rd_reaction = rdChemReactions.ChemicalReaction() rd_reaction.AddReactantTemplate(self.reactant.rd_mol) rd_reaction.AddProductTemplate(self.product.rd_mol) return rd_reaction def run_path_search(self, seed=42): # TODO make it possible to take list og coords and energies """ """ if self._path_search_calculator is None: raise ReactionException("Set the path search calculator") self._ts_path_energies, self._ts_path_coordinates = self.path_search_calculator(self, seed=seed) def _run_ts_search(self, ts_calculator=None): """ Run a transition state search using Gaussian. """ if len(self.ts_guess_coordinates) == 0: raise ReactionException("Run a path search before TS search.") if ts_calculator is None: raise RuntimeError("Needs a G16 calculator!") if "structure" not in ts_calculator._properties: print('added "structure" to properties') ts_calculator._properties += ["structure"] if "frequencies" not in ts_calculator._properties: print('added "frequencies" to properties') ts_calculator._properties += ["frequencies"] for coords in self.ts_guess_coordinates: ts_results = ts_calculator( self.reactant.atom_symbols, coords, label=self.reaction_label ) if ts_results["converged"] is True: img_frequencis = [freq for freq in ts_results["frequencies"] if freq < 0.0] if len(img_frequencis) == 1: self._ts_coordinates.append(ts_results['structure']) self._ts_energies.append(ts_results['energy']) else: print("not one img. frequency.") self._ts_coordinates = np.array(self._ts_coordinates) self._ts_energies = np.array(self._ts_energies) def _run_irc(self, irc_calculator = None, refine_calculator = None): """ You do not need to add forward/reverse. This is done automatically. TODO: perform reverse and forward in parallel. """ if "structure" in irc_calculator._properties: print('replacing "structure" in properties with "irc_structure"') structure_idx = irc_calculator._properties.index("structure") irc_calculator._properties += ["irc_structure"] del irc_calculator._properties[structure_idx] all_irc_results = [] for ts_coords in self._ts_coordinates: irc_results = dict() for rev_or_fw in ["reverse", "forward"]: tmp_kwds = [kwd.strip() for kwd in irc_calculator._kwds.split(",")] if "reverse" in tmp_kwds: del tmp_kwds[tmp_kwds.index("reverse")] elif "forward" in tmp_kwds: del tmp_kwds[tmp_kwds.index("forward")] tmp_kwds.insert(-1, rev_or_fw) irc_calculator._kwds = ", ".join(tmp_kwds) results = irc_calculator( self.reactant.atom_symbols, ts_coords, label=f"{self.reaction_label}_{rev_or_fw}" ) results = dict([ itm for itm in results.items() if itm[0] in ['converged', 'irc_structure'] ]) irc_results[rev_or_fw] = results all_irc_results.append(irc_results) # Refine IRC endpoint with refine calculator. if refine_calculator is not None: # Loop over each IRC for irc_result in all_irc_results: # Loop over each IRC for rev_or_fwd, results in irc_result.items(): if not results['converged']: continue refine_results = refine_calculator( self.reactant.atom_symbols, results["irc_structure"], label=f"{self.reaction_label}_refine_irc_{rev_or_fwd}", ) results['irc_structure'] = refine_results['structure'] return all_irc_results def irc_check_ts(self, ts_calculator, irc_calculator, refine_calculator): """ """ # Run TS optimization self._run_ts_search(ts_calculator) # Run IRC irc_endpoints_results = self._run_irc(irc_calculator, refine_calculator) ts_ok = [] for irc_enpoints in irc_endpoints_results: found_ends = {"reactant": False, "product": False} for results in irc_enpoints.values(): endpint_ac = coords_to_AC(self.reactant.atom_symbols, results["irc_structure"]) found_reactant = np.array_equal(self.reactant.ac_matrix, endpint_ac) found_product = np.array_equal(self.product.ac_matrix, endpint_ac) if found_reactant: found_ends["reactant"] = True elif found_product: found_ends["product"] = True ts_ok.append(found_ends) self.ts_check = ts_ok def write_ts(self): """ Write xyz file for all TS's """ symbols = [atom.GetSymbol() for atom in self.reactant.rd_mol.GetAtoms()] for i, (energy, coords) in enumerate(zip(self._ts_energies, self._ts_coordinates)): name = f"{self.reaction_label}_ts{i}" xyz = f"{len(symbols)}\n {name}: {energy:.5f} Hartree \n" for symbol, coord in zip(symbols, coords): xyz += f"{symbol} " + " ".join(map(str, coord)) + "\n" with open(name + ".xyz", 'w') as xyzfile: xyzfile.write(xyz) def write_ts_guess(self): """ """ symbols = [atom.GetSymbol() for atom in self.reactant.rd_mol.GetAtoms()] for i, (energy, coords) in enumerate(zip(self.ts_guess_energies, self.ts_guess_coordinates)): name = f"{self.reaction_label}_tsguess{i}" xyz = f"{len(symbols)}\n {name}: {energy:.5f} Hartree \n" for symbol, coord in zip(symbols, coords): xyz += f"{symbol} " + " ".join(map(str, coord)) + "\n" with open(name + ".xyz", 'w') as xyzfile: xyzfile.write(xyz) ``` #### File: elementary_step_om/external_calculation/xtb_calculations.py ```python import os import shutil import textwrap import numpy as np from rdkit import Chem from .calculator import Calculator, CalculatorError from elementary_step_om.xyz2mol_local import xyz2AC_vdW from elementary_step_om.io import io_xtb from elementary_step_om.chem import Reaction def get_xtb_version(xtb_path): """ """ output, _ = Calculator.run_cmd(f"{xtb_path} --version") for line in output.split('\n'): if "version" in line.lower(): return line.split()[2] class xTBCalculator(Calculator): allowd_properties = ["energy", "structure"] def __init__( self, xtb_kwds: str = "--opt loose", charge: int = 0, spin: int = 1, properties: list = ["energy", "structure"], nprocs: int = 1, overwrite: bool = True, location: str = ".", ): # Check that the property reader is implemeted. for property in properties: if property not in self.allowd_properties: raise CalculatorError("Property not implemented.") self._charge = charge self._spin = spin self._properties = properties self._xtb_kwds = xtb_kwds super().__init__(nprocs=nprocs, location=location, overwrite=overwrite) self._setup_xtb_enviroment() def _setup_xtb_enviroment(self) -> None: """ """ os.environ["OMP_NUM_THREADS"] = str(self._nprocs) os.environ["MKL_NUM_THREADS"] = str(self._nprocs) if "XTB_CMD" not in os.environ: raise CalculatorError('No XTB_CMD command. export XTB_CMD="path_to_xtb"') self._xTB_CMD = os.environ["XTB_CMD"] # takes path from XTB_CMD def _make_cmd(self, input_filename: str) -> None: """ """ xtb_cmd = f"{self._xTB_CMD} {input_filename} {self._xtb_kwds} --norestart" xtb_cmd += f" --chrg {self._charge} --uhf {self._spin - 1}" return xtb_cmd def _write_input(self, atoms, coords, namespace) -> str: """ """ if len(atoms) != len(coords): raise CalculatorError("Length of atoms and coords doesn't match.") input_filename = namespace + ".xyz" with open(input_filename, "w") as inputfile: inputfile.write(f"{len(atoms)} \n \n") for atom, coord in zip(atoms, coords): inputfile.write(f"{atom} {coord[0]} {coord[1]} {coord[2]} \n") return input_filename def _clean(self, working_dir: str) -> None: """ """ self._remove_working_dir(working_dir) def __call__(self, atoms, coords, label): """ """ results = {} working_dir = self._make_working_directory(namespace=label) input_filename = self._write_input(atoms, coords, label) cmd = self._make_cmd(input_filename) output, errmsg = Calculator.run_cmd(cmd) # Extract properties. if "normal termination" in errmsg: results["normal_termination"] = True for property in self._properties + ["converged"]: property_value = io_xtb.read_xtb_out(output, property=property) # Something wrong with output.. if property_value is None: results = {} results["normal_termination"] = False break results[property] = property_value else: results["normal_termination"] = False self._clean(working_dir) return results class xTBPathSearch: """ """ def __init__( self, xtb_kwds: str = "", nruns: int = 3, overwrite: bool = True, nprocs: int = 1, memory: int = 2, location: str = ".", ): self._xtb_kwds = xtb_kwds self._nruns = nruns self._overwrite = overwrite self._nprocs = nprocs self._memory = memory self._location = location self._root_dir = os.getcwd() self._setup_xtb_enviroment() def _setup_xtb_enviroment(self) -> None: """ """ os.environ["OMP_STACKSIZE"] = str(self._memory) + "G" os.environ["OMP_NUM_THREADS"] = str(self._nprocs) os.environ["MKL_NUM_THREADS"] = str(self._nprocs) if "XTB_CMD" not in os.environ: raise CalculatorError('No XTB_CMD command. export XTB_CMD="path_to_xtb"') self._xTB_CMD = os.environ["XTB_CMD"] # check xTB version. if get_xtb_version(self._xTB_CMD) != "6.1.4": raise CalculatorError("Path search can only be used with v. 6.1.x") @property def reaction(self): return self._reaction @reaction.setter def reaction(self, reaction): self._reactant = reaction.reactant self._product = reaction.product self._charge = reaction.charge self._spin = reaction.spin self._reaction_label = reaction.reaction_label def _write_input(self, kpush, kpull, alpha, temp, forward_reaction=True): """ Write "ractant" and "product" files. If forward_reaction = True the calculation is run from reactant -> product, if false it is performed from product -> reactant. """ self._reactant_fname = self._reaction_label + "_r.xyz" self._product_fname = self._reaction_label + "_p.xyz" if forward_reaction: self._from_file = self._reactant_fname self._to_file = self._product_fname else: self._from_file = self._product_fname self._to_file = self._reactant_fname path_file = """\ $path nrun=1 nopt=100 anopt=3 kpush={0} kpull={1} alp={2} product={4} $end $scc temp={3} $end $opt optlevel=2 $end """.format( kpush, kpull, alpha, temp, self._to_file ) with open("path.inp", "w") as ofile: ofile.write(textwrap.dedent(path_file)) self._reactant.conformers[0].write_xyz(filename=self._reactant_fname) self._product.conformers[0].write_xyz(filename=self._product_fname) def _make_cmd(self): """ """ cmd = f"{self._xTB_CMD} {self._from_file} --path --input path.inp --gfn2 --norestart " cmd += f"--chrg {self._charge} --uhf {self._spin - 1} " cmd += self._xtb_kwds return cmd def _run_xtb_path(self, iter_num, kpush, kpull, alpha, temp, forward_reaction=True): """ Run the xTB path search with the given parameters. return the xTB output. """ os.makedirs(f"run{iter_num}") os.chdir(f"run{iter_num}") self._write_input(kpush, kpull, alpha, temp, forward_reaction=forward_reaction) cmd = self._make_cmd() output, stderr = Calculator.run_cmd(cmd) with open("xtbout.log", 'w') as f: f.write(output) os.chdir("..") return output def _get_single_point_energies(self, path_coords): """ Compute single point energies for each point in 'path_coords' """ xtb_calc = xTBCalculator( xtb_kwds="--sp", charge=self._charge, spin=self._spin, properties=["energy"] ) atom_symbols = self._reactant.conformers[0].atom_symbols sp_energies = np.empty(path_coords.shape[0]) for point_idx, path_point_coord in enumerate(path_coords): results = xtb_calc(atom_symbols, path_point_coord, f"point{point_idx}") sp_energies[point_idx] = results["energy"] return sp_energies * 627.503 def _interpolate_ts(self, path_coords, npoints=20): """ """ natoms = path_coords.shape[1] energies = self._get_single_point_energies(path_coords) max_energy_idx = energies.argmax() difference_mat = ( path_coords[max_energy_idx - 1] - path_coords[max_energy_idx + 1] ) interpolated_path_coords = np.zeros((npoints, natoms, 3)) for j in range(npoints + 1): interpolated_path_coords[j - 1] = ( path_coords[max_energy_idx + 1] + j / npoints * difference_mat ) interpolated_energies = self._get_single_point_energies( interpolated_path_coords ) return interpolated_energies, interpolated_path_coords def _is_reaction_complete(self, output): """ Checks that the path have an RMSD below 0.5 AA.""" # TODO Not super efficient, reads all lines in file. output_lines = output.split("\n") for line in output_lines: if "run 1 barrier" in line: try: rmsd = float(line.split()[-1]) except: rmsd = 9999.9 # rmsd above 0.5 reaction not complete. print(line) if rmsd < 0.5: return True return False def _read_path(self, relative_path="."): """ Read coordinates and energies from the path search """ path_filename = os.path.join(relative_path, "xtbpath_1.xyz") with open(path_filename, "r") as path_file: xtbpath = path_file.read() path_xyz_blocks = xtbpath.split("SCF done") natoms = int(path_xyz_blocks[0]) del path_xyz_blocks[0] path_coords = np.zeros((len(path_xyz_blocks), natoms, 3)) relative_energies = np.zeros(len(path_xyz_blocks)) for structure_idx, path_strucrure in enumerate(path_xyz_blocks): xyz_data = path_strucrure.split("\n") relative_energies[structure_idx] = float(xyz_data[0]) del xyz_data[0] coords = np.zeros((natoms, 3)) for j in range(natoms): atom_coord = [coord for coord in xyz_data[j].split()][1:] coords[j] = np.array(atom_coord).astype(float) path_coords[structure_idx] = coords return relative_energies, path_coords def _is_reac_prod_identical(self, path_coords): """ This function ensures that if RMSD is above 0.5AA, and something happend in the last iteration - it is probably an intermediate. """ pt = Chem.GetPeriodicTable() atom_nums = [ pt.GetAtomicNumber(atom) for atom in self._reactant.conformers[0].atom_symbols ] reactant_ac, _ = xyz2AC_vdW(atom_nums, path_coords[0]) product_ac, _ = xyz2AC_vdW(atom_nums, path_coords[-1]) if np.array_equal(reactant_ac, product_ac): # Nothing happend - reac = prod return True return False def _find_xtb_path(self, temp=300): """""" kpull_list = [-0.02, -0.02, -0.02, -0.03, -0.03, -0.04, -0.04] alp_list = [ 0.6 , 0.3 , 0.3 , 0.6 , 0.6 , 0.6 , 0.4] def run_param_set(kpush, kpull, alpha, run_num, reac_direction): """""" os.makedirs(f"param_set{run_num}") os.chdir(f"param_set{run_num}") for iter_num in range(3): kpush = round(kpush, 4) kpull = round(kpull, 4) output = self._run_xtb_path( iter_num, kpush, kpull, alpha, temp, forward_reaction=reac_direction ) if self._is_reaction_complete(output): print(f">> Found a xTB path going forward: {reac_direction}") print(f" * kpush: {kpush:.4f}") print(f" * kpull: {kpull:.4f}") print(f" * alpha: {alpha}") print(f" * temp: {temp}") # Read output path, and energies. final_energies, final_path_coords = self._read_path( relative_path=f"run{iter_num}" ) return True, final_energies, final_path_coords kpush *= float(1.5) kpull *= float(1.5) # if nothing happens, return the path for the first micro run. energies, path_coords = self._read_path(relative_path="run0") os.chdir("..") return False, energies, path_coords direction_forward = True i = 0 for param_set_idx, (kpull, alpha) in enumerate(zip(kpull_list, alp_list)): if param_set_idx == 0: kpush = 0.008 else: kpush = 0.01 found_path, energies, coords = run_param_set( kpush, kpull, alpha, param_set_idx, direction_forward ) if found_path: return True, energies, coords # if the search was unsucessfull, check if the reactant and product changed. # If they didn't update reactant and product structures. if self._is_reac_prod_identical(coords): if direction_forward: self._reactant.conformers[0].coordinates = coords[-1] else: self._product.conformers[0].coordinates = coords[-1] else: print() text = ( ">> Something happend but RMSD not bellow 0.5 RMSD. " "Most likely not a one step reaction." ) print(text) return "found intermediate", None, None # Change direction i += 1 if i % 2 == 0: direction_forward = True else: direction_forward = False return "increase temp", None, None def _run_barrer_scan(self): """ """ npoints_iterpolate = 20 return_msg_temp300, _, path_coords = self._find_xtb_path(temp=300) if return_msg_temp300 is True: ts_energy, ts_coords = self._interpolate_ts(path_coords, npoints=npoints_iterpolate) return ts_energy, ts_coords elif return_msg_temp300 == "found intermediate": return None, None elif return_msg_temp300 == "increase temp": print("Didn't find a path. Increasing the temperature to 6000 K.") os.makedirs('tmp6000') os.chdir('tmp6000') return_msg_temp6000, _, path_coords = self._find_xtb_path(temp=6000) print("return code: ", return_msg_temp6000) if return_msg_temp6000 is True: ts_energy, ts_coords = self._interpolate_ts(path_coords, npoints=20) return ts_energy, ts_coords else: return None, None def _make_root_working_directory(self): """ Make the directory the path-search is working in """ self._root_workind_dir = os.path.join(self._location, self._reaction_label) if os.path.isdir(self._root_workind_dir) and self._overwrite: shutil.rmtree(self._root_workind_dir) elif os.path.isdir(self._root_workind_dir) and not self._overwrite: raise CalculatorError("Root directory exists.") os.makedirs(self._root_workind_dir) os.chdir(self._root_workind_dir) def __call__(self, reaction: Reaction, seed: int = 42, embed_refine_calc = None): """ """ self.reaction = reaction self._make_root_working_directory() # Embeding with random seed -> small difference in starting geometries. np.random.seed(seed=seed) random_seeds = np.random.randint(1, 1000, self._nruns, dtype=int) ts_energy, ts_coords = [], [] for i in range(self._nruns): self._reactant.conformers = [] self._reactant.embed_molecule( confs_pr_frag=1, refine_calculator=embed_refine_calc, direction=[0.8, 0, 0], seed=random_seeds[i] ) self._product.conformers = [] self._product.embed_molecule( confs_pr_frag=1, refine_calculator=embed_refine_calc, direction=[0.8, 0., 0.], seed=random_seeds[i] ) os.makedirs(f"pathrun{i}") os.chdir(f"pathrun{i}") energy, coords = self._run_barrer_scan() ts_energy.append(energy) ts_coords.append(coords) os.chdir(self._root_dir) os.chdir(self._root_workind_dir) os.chdir(self._root_dir) return ts_energy, ts_coords ``` #### File: elementary_step_om/elementary_step_om/utils.py ```python import networkx as nx from itertools import combinations import hashlib from rdkit import Chem from rdkit.Chem.rdchem import ChiralType, BondStereo def is_equal(frag1, frag2): if frag1.HasSubstructMatch(frag2) and frag2.HasSubstructMatch(frag1): return True return False def equivalent_neighbors(atom, useChirality=True): """ Breaks bonds on 'atom', and compares the resulting fragments. If two fragments are identical, the atom isn't chiral. I don't think i want to use chirality when comparing the fragments. Are the fragment different when it only differentiates in the chirality? """ mol = atom.GetOwningMol() fragments = [] for nei in atom.GetNeighbors(): bond_to_break = mol.GetBondBetweenAtoms(atom.GetIdx(), nei.GetIdx()).GetIdx() new_mol = bond = Chem.FragmentOnBonds( mol, [bond_to_break], dummyLabels=[(0, 0)] ) new_frags = Chem.GetMolFrags(new_mol, asMols=True, sanitizeFrags=False) fragments += new_frags if len(new_frags) > 1: if is_equal(new_frags[0], new_frags[1]): return False for frag1, frag2 in combinations(fragments, 2): if is_equal(frag1, frag2): return True return False def double_bond_pseudochiral(bond): """ Checks if the double bond is pseudo chiral. """ begin_atom = bond.GetBeginAtom() end_atom = bond.GetEndAtom() if equivalent_neighbors(begin_atom) or equivalent_neighbors(end_atom): return True return False def make_graph_hash(mol, use_atom_maps=False): """ As is now. It doesn't recognise symmetric rings of i.e. the type: '[H]C([O-])=C([O-])OOC(=C1OO1)C([H])([H])[H]' as two identical neighbours, and are therefore chiral. The function return the hash(weisfeiler_lehman_graph_hash). That is, the hash of the hash. The __hash__ dundur needs a integer which you get from the hash() functions. The hash is truncated to 16 digits. Is that smart? """ ac_matrix = Chem.GetAdjacencyMatrix(mol) graph = nx.from_numpy_matrix(ac_matrix) # node labels for node_label in graph.nodes(): atom = mol.GetAtomWithIdx(node_label) atom_symbol = atom.GetSymbol() # Chirality on atoms rdkit_chiral_tag = atom.GetChiralTag() if rdkit_chiral_tag == ChiralType.CHI_UNSPECIFIED: chiral_tag = "" else: if equivalent_neighbors(atom) and not use_atom_maps: # TODO is this ok? chiral_tag = "" elif rdkit_chiral_tag == ChiralType.CHI_TETRAHEDRAL_CW: chiral_tag = "@" elif rdkit_chiral_tag == ChiralType.CHI_TETRAHEDRAL_CCW: chiral_tag = "@@" # Atomic charges formal_atomic_charge = str(atom.GetFormalCharge()) atom_identifier = atom_symbol + chiral_tag + formal_atomic_charge if use_atom_maps: atom_identifier += str(atom.GetAtomMapNum()) graph.nodes[node_label]["atom_identifier"] = atom_identifier # Edge labels for edge_labels in graph.edges(): bond = mol.GetBondBetweenAtoms(*edge_labels) bond_type = bond.GetSmarts(allBondsExplicit=True) # This or use bond type rdkit_bond_stereo = bond.GetStereo() if rdkit_bond_stereo == BondStereo.STEREONONE: bond_stereo = "" else: if double_bond_pseudochiral(bond) and not use_atom_maps: #TODO is this OK? bond_stereo = "" elif rdkit_bond_stereo in [BondStereo.STEREOCIS, BondStereo.STEREOZ]: bond_stereo = "\/" elif rdkit_bond_stereo in [BondStereo.STEREOTRANS, BondStereo.STEREOE]: bond_stereo = "//" bond_identifier = bond_type + bond_stereo graph.edges[edge_labels]["bond_identifier"] = bond_identifier nx_hash_hex = nx.weisfeiler_lehman_graph_hash(graph, node_attr="atom_identifier", edge_attr="bond_identifier") m = hashlib.sha256() m.update(nx_hash_hex.encode('utf-8')) return int(str(int(m.hexdigest(), 16))[:32]) ``` #### File: elementary_step_om/tests/tests_molecule.py ```python import unittest from rdkit import Chem from elementary_step_om.compound import MoleculeException, Molecule class TestMolecule(unittest.TestCase): def setUp(self): with open("./tests/fixtures/mapped_molfile.mol", 'r') as mapped_molblock: self.molblock_mapped = mapped_molblock.read() with open("./tests/fixtures/unmapped_molfile.mol", 'r') as unmapped_molblock: self.molblock_unmapped = unmapped_molblock.read() def test_no_input(self): self.assertRaises(MoleculeException, Molecule, ) def test_init_with_mapped_molblock(self): mol = Molecule(moltxt=self.molblock_mapped) # Atom mapping is correct. self.assertEqual( [atom.GetAtomMapNum() for atom in mol.rd_mol.GetAtoms()], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] ) # atom_mapped key is set self.assertTrue(mol.atom_mapped) def test_init_with_unmapped_molblock(self): mol = Molecule(moltxt=self.molblock_unmapped) # Atom mapping is correct. self.assertEqual( [atom.GetAtomMapNum() for atom in mol.rd_mol.GetAtoms()], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) # atom_mapped key is set self.assertFalse(mol.atom_mapped) def test_mapped_hash(self): mol = Molecule(moltxt=self.molblock_mapped) self.assertEqual(hash(mol), 1588624637251409299) def test_unmapped_hash(self): mol = Molecule(moltxt=self.molblock_unmapped) self.assertEqual(hash(mol), 2073239415288407608) def test_make_canonical(self): mol = Molecule(moltxt=self.molblock_mapped) mol.make_canonical() self.assertEqual(hash(mol), 2073239415288407608) # unmapped hash self.assertFalse(mol.atom_mapped) if __name__ == "__main__": unittest.main() ```
{ "source": "jensengroup/fragreact", "score": 3 }
#### File: jensengroup/fragreact/cbh.py ```python import numpy as np import re from rdkit import Chem from rdkit.Chem import rdMolDescriptors from itertools import combinations import copy def print_smiles(smiles_list, human=False): smiles_dict = count_smiles(smiles_list) keys = smiles_dict.keys() keys.sort() out = [] for key in keys: out += [str(smiles_dict[key]) + " " + key] return " ".join(out) def print_reaction(reactants, products, human=False): if not human: reaction = ">>".join([".".join(reactants), ".".join(products)]) else: reactants = print_smiles(reactants) products = print_smiles(products) reaction = reactants+ ">>"+ products return reaction def canonical(smiles): """ SMILES provided is canonical, so the output should be the same no matter how a particular molecule is input """ m = Chem.MolFromSmiles(smiles) smiles = Chem.MolToSmiles(m) return smiles def kekulize(smiles): m = Chem.MolFromSmiles(smiles) Chem.Kekulize(m) smiles = Chem.MolToSmiles(m, kekuleSmiles=True) return smiles def count_hydrogens(smiles): """ """ m = Chem.MolFromSmiles(smiles) n_hydrogen = 0 for a in m.GetAtoms(): n_hydrogen += a.GetTotalNumHs() # print a.GetAtomicNum() # print a.GetTotalNumHs() # print a.GetNumExplicitHs() # print a.GetNumImplicitHs() return n_hydrogen def count_smiles(smiles_list): """ Count SMILES by creating a dictionary with SMILES as keys, point to the number of that particular SMILES. e.i. dict[smiles] = # of smiles """ smiles_dict = {} components, components_count = np.unique(smiles_list, return_counts=True) for comp, count in zip(components, components_count): smiles_dict[comp] = count return smiles_dict def substract_smiles(A, B): """ A - B = Cp + Cn where Cp has positive results and Cn has negative results """ if isinstance(A, str): A = A.split(".") if isinstance(B, str): B = B.split(".") Cp = [] Cn = [] A = count_smiles(A) B = count_smiles(B) for key in np.unique(list(A.keys()) + list(B.keys())): if key not in A: Cn += [key] * B[key] continue if key not in B: Cp += [key] * A[key] continue diff = A[key] - B[key] if diff == 0: continue elif diff > 0: Cp += [key]*diff elif diff < 0: Cn += [key]*abs(diff) return Cp, Cn def tuning(left_side, right_side): corrected_left = [] corrected_right = [] left_side = count_smiles(left_side) right_side = count_smiles(right_side) for key in np.unique(list(left_side.keys()) + list(right_side.keys())): if key not in left_side: print("hello") quit() if key not in right_side: print("hello2") quit() diff = right_side[key] - left_side[key] if diff == 0: continue elif diff > 0: corrected_left += [key] * diff elif diff < 0: corrected_right += [key] * diff return corrected_left, corrected_right def get_bond_type(m, a, b): # NOTE # If m is not kekulized then bonds can be AROMATIC # which is a problem for the component schemes try: bond_type = str(m.GetBondBetweenAtoms(a, b).GetBondType()) except AttributeError: return False if bond_type == "SINGLE": bond = "" elif bond_type == "DOUBLE": bond = "=" elif bond_type == "TRIPLE": bond = "#" else: bond = False return bond def get_atoms(smiles, ignore_hydrogen=True): smiles = kekulize(smiles) p = re.compile(r"[A-Z][a-z]?") atoms = p.findall(smiles) if ignore_hydrogen: atoms = [atom for atom in atoms if atom != "H"] return atoms def add_neighbours(mol, substructures): substructures = list(substructures) for j, idx in enumerate(substructures): for i in idx: A = mol.GetAtomWithIdx(i) for B in A.GetNeighbors(): k = B.GetIdx() substructures[j] += (k,) return substructures def get_components_neighbors(mol, atoms): atoms = list(atoms) for idx in atoms: idx, = idx A = mol.GetAtomWithIdx(idx) for B in A.GetNeighbors(): idx_b = B.GetIdx() atom = B.GetAtomicNum() charge = B.GetFormalCharge() bond = Chem.GetBondBetweenAtoms(mol, idx, idx_b) return def get_components(smiles, smart, kekulize=True, add=False): m = Chem.MolFromSmiles(smiles) smart = Chem.MolFromSmarts(smart) if kekulize: Chem.Kekulize(m) substructures = m.GetSubstructMatches(smart) components = [] if add: substructures = add_neighbours(m, substructures) for sub in substructures: if add: m_new = copy.copy(m) m_new = Chem.RWMol(m_new) for B, C in combinations(sub[1:], 2): m_new.RemoveBond(B, C) else: m_new = m component = Chem.MolFragmentToSmiles(m_new, atomsToUse=sub, isomericSmiles=True, kekuleSmiles=True, canonical=True) A = m.GetAtomWithIdx(sub[0]) mc = Chem.MolFromSmiles(component) n_atoms = mc.GetNumAtoms() n_bonds = len(mc.GetBonds()) component = Chem.MolToSmiles(mc) if "+" in component or "-" in component or "H" in component: # Very awful hack to fix the charged molecules and their explicit # hydrogens charges = np.zeros(n_atoms, dtype=int) for idx in range(n_atoms): atom = mc.GetAtomWithIdx(idx) atom.SetNumExplicitHs(0) charge = atom.GetFormalCharge() charges[idx] = charge atom.SetFormalCharge(0) component = Chem.MolToSmiles(mc, canonical=False) component = component.replace("[", "").replace("]","") mc = Chem.MolFromSmiles(component) for idx, charge in zip(range(n_atoms), charges): atom = mc.GetAtomWithIdx(idx) charge = int(charge) atom.SetFormalCharge(charge) component = Chem.MolToSmiles(mc) if n_atoms <= n_bonds: mw = Chem.RWMol(m) if len(sub) == 3: mw.RemoveBond(sub[0], sub[-1]) elif len(sub) == 4 or len(sub) == 5: for i in range(0, n_atoms): for j in range(i+1, n_atoms): if i == 1 or j == 1: continue mw.RemoveBond(sub[i], sub[j]) component = Chem.MolFragmentToSmiles(mw, atomsToUse=sub, isomericSmiles=True, kekuleSmiles=True, canonical=True) if "1" in component: quit("Error connectivity") else: component = Chem.MolToSmiles(mc) # charge = Chem.GetFormalCharge(mc) # # if not charge == 0: # # NOTE # # Lots of lots of if case down this road # # n_atoms = mc.GetNumAtoms() # # for i in range(n_atoms): # # atom = mc.GetAtomWithIdx(i) # charge = atom.GetFormalCharge() # # if not charge == 0: # atom.SetFormalCharge(0) component = canonical(component) components += [component] return components def get_components_scheme1(smiles, kekulize=True): c1 = "[*]~[*]" if "+" in smiles or "-" in smiles: pass else: return get_components(smiles, c1) # The code below doesn't get charges return get_components(smiles, c1) c1 = Chem.MolFromSmarts(c1) m = Chem.MolFromSmiles(smiles) if kekulize: Chem.Kekulize(m) substructures = m.GetSubstructMatches(c1) components = [] for sub in substructures: a, b = sub ab = get_bond_type(m, a, b) a = m.GetAtomWithIdx(a).GetSymbol() b = m.GetAtomWithIdx(b).GetSymbol() component = a + ab + b components.append(component) components = [canonical(component) for component in components] return components def get_components_scheme2(smiles, kekulize=True): c1 = "[D2]" c2 = "[*]~[D2]~[*]" c3 = "[*]~[D3](~[*])~[*]" c4 = "[*]~[*](~[*])(~[*])~[*]" # if "+" in smiles or "-" in smiles: # pass # else: components = [] components += get_components(smiles, c1, add=True) # components += get_components(smiles, c2) components += get_components(smiles, c3) components += get_components(smiles, c4) return components c2 = Chem.MolFromSmarts(c2) c3 = Chem.MolFromSmarts(c3) c4 = Chem.MolFromSmarts(c4) m = Chem.MolFromSmiles(smiles) if kekulize: Chem.Kekulize(m) substructures = m.GetSubstructMatches(c2) components = [] for sub in substructures: a, b, c = sub ab = get_bond_type(m, a, b) bc = get_bond_type(m, b, c) a = m.GetAtomWithIdx(a).GetSymbol() b = m.GetAtomWithIdx(b).GetSymbol() c = m.GetAtomWithIdx(c).GetSymbol() component = a + ab + b + bc + c components.append(component) substructures = m.GetSubstructMatches(c3) for sub in substructures: a, b, c, d = sub ab = get_bond_type(m, a, b) bc = get_bond_type(m, b, c) bd = get_bond_type(m, b, d) a = m.GetAtomWithIdx(a).GetSymbol() b = m.GetAtomWithIdx(b).GetSymbol() c = m.GetAtomWithIdx(c).GetSymbol() d = m.GetAtomWithIdx(d).GetSymbol() component = a + ab + b + "(" + bc + c + ")" + bd + d components.append(component) substructures = m.GetSubstructMatches(c4) for sub in substructures: a, b, c, d, e = sub ab = get_bond_type(m, a, b) bc = get_bond_type(m, b, c) bd = get_bond_type(m, b, d) be = get_bond_type(m, b, e) a = m.GetAtomWithIdx(a).GetSymbol() b = m.GetAtomWithIdx(b).GetSymbol() c = m.GetAtomWithIdx(c).GetSymbol() d = m.GetAtomWithIdx(d).GetSymbol() e = m.GetAtomWithIdx(e).GetSymbol() component = a + ab + b component += "(" + bc + c + ")" component += "(" + bd + d + ")" component += be + e components.append(component) components = [canonical(component) for component in components] return components def decompontent(smiles, scheme=1): if scheme == 1: decompontent_scheme = decompontent_scheme1 elif scheme == 2: decompontent_scheme = decompontent_scheme2 left, right = decompontent_scheme(smiles) return left, right def decompontent_scheme1(smiles): """ Tune the equation A (bb) => aa where A (target) is big smiles aa (scheme1 components) is scheme2 components bb (atoms) is additional bonds required, to have equald bonds on each side this is done for each A which consists of len(aa) > 0 """ components = get_components_scheme1(smiles) if len(components) == 0: return [], [] bonds_leftside = get_atoms(smiles) bonds_rightside = [] for component in components: bonds_rightside += get_atoms(component) left, right = tuning(bonds_leftside, bonds_rightside) right += components return left, right def decompontent_scheme2(smiles): """ Tune the equation A (bb) => aa where A (target) is big smiles aa (scheme2 components) is scheme2 components bb (single bonds) is additional bonds required, to have equald bonds on each side this is done for each A which consists of len(aa) > 0 """ components = get_components_scheme2(smiles) if len(components) == 0: return [], [] bonds_leftside = get_components_scheme1(smiles) bonds_rightside = [] for component in components: bonds_rightside += get_components_scheme1(component) left, right = tuning(bonds_leftside, bonds_rightside) right += components if not check_atoms([smiles] + left, right): print("Error in fragreact tuneing:", smiles) print([smiles], left, right) quit() return left, right def resultant(reactants, products, scheme=1): """ assummed that smiles lists are both split(".") and canonical at this point """ reactants_leftside = [] reactants_rightside = [] products_leftside = [] products_rightside = [] reactants_missing = [] products_missing = [] if scheme == 1: decompontent_scheme = decompontent_scheme1 elif scheme == 2: decompontent_scheme = decompontent_scheme2 for reactant in reactants: left, right = decompontent_scheme(reactant) if len(left) == 0 and len(right) == 0: reactants_missing += [reactant] reactants_leftside += left reactants_rightside += right for product in products: left, right = decompontent_scheme(product) if len(left) == 0 and len(right) == 0: products_missing += [product] products_leftside += left products_rightside += right left_positive, left_negative = substract_smiles(products_leftside, reactants_leftside) right_positive, right_negative = substract_smiles(products_rightside, reactants_rightside) left = left_positive + right_negative + reactants_missing right = right_positive + left_negative + products_missing left, right = substract_smiles(left, right) hydrogens_left = 0 hydrogens_right = 0 for each in left: hydrogens_left += count_hydrogens(each) for each in right: hydrogens_right += count_hydrogens(each) tune_hydrogens = hydrogens_left - hydrogens_right if tune_hydrogens < 0: left += ['[H+]']*abs(tune_hydrogens) if tune_hydrogens > 0: right += ['[H+]']*tune_hydrogens return left, right def split_smiles(smiles, num_sep=None): """ number seperator num_sep (e.g. 3xCC, num_spe="x") """ if type(smiles) == type(""): smiles_list = smiles.split(".") else: smiles_list = smiles for i, smiles in enumerate(smiles_list): smiles = smiles.split(".") if len(smiles) > 1: smiles_list[i] = smiles[0] smiles_list += smiles[1:] if num_sep: for i, smiles in enumerate(smiles_list): if num_sep in smiles: num, smiles = smiles.split(num_sep) num = int(num) smiles_list[i] = smiles smiles_list += [smiles]*(num-1) return smiles_list def cbh_n(reactants, products, scheme, do_canonical=True): """ Use connectivity-based hieracy for reaction (reactants -> products) in: reactants -- list of SMILES products -- list of SMILES scheme -- int level of connecitivty out: left -- list of smiles for the reactant part of the CBHn reaction right -- list of smiles for the product part of the CBHn reaction """ if do_canonical: reactants = [canonical(smiles) for smiles in reactants] products = [canonical(smiles) for smiles in products] left, right = resultant(reactants, products, scheme=scheme) return left, right def check_atoms(reactants, products): """ Check the validity of the reaction. Reaction should have eq. no. of atoms for both reactants and products. """ ratoms = [get_atoms(smiles) for smiles in reactants] patoms = [get_atoms(smiles) for smiles in products] # flatten ratoms = sum(ratoms, []) patoms = sum(patoms, []) ratoms.sort() patoms.sort() return ratoms == patoms def check_reaction(reactants, products): """ """ if isinstance(reactants, list): reactants = ".".join(reactants) if isinstance(products, list): products = ".".join(products) reactants = Chem.MolFromSmiles(reactants) products = Chem.MolFromSmiles(products) return rdMolDescriptors.CalcMolFormula(reactants) == rdMolDescriptors.CalcMolFormula(products) ``` #### File: jensengroup/fragreact/shell.py ```python import subprocess as sp def shell(cmd, shell=False): if shell: p = sp.Popen(cmd, shell=True, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE) else: cmd = cmd.split() p = sp.Popen(cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE) output, err = p.communicate() return output ``` #### File: jensengroup/fragreact/test.py ```python import numpy as np import cbh def test_fragmentation_cbh1(): test_smiles = "C1=C[C@H]2C[C@@H]1CC2" assert sorted(cbh.get_components_scheme1(test_smiles)) == sorted("CC C=C CC CC CC CC CC CC".split()) return def test_fragmentation_cbh2(): test_smiles = "C1=C[C@H]2C[C@@H]1CC2" assert sorted(cbh.get_components_scheme2(test_smiles)) == sorted("C=CC C=CC CCC CCC CCC CC(C)C CC(C)C".split()) return def test_fragmentation_reaction_cbh1(): reactants, products = ["C1=CC=CC1", "C=C"], ["C1=C[C@H]2C[C@@H]1CC2"] left, right = cbh.cbh_n(reactants, products, 1) assert sorted(left) == sorted(['C', 'C', 'C', 'C', 'C=C', 'C=C']) assert sorted(right) == sorted(['CC', 'CC', 'CC', 'CC']) return def test_fragmentation_reaction_cbh2(): reactants, products = ["C1=CC=CC1", "C=C"], ["C1=C[C@H]2C[C@@H]1CC2"] left, right = cbh.cbh_n(reactants, products, 2) assert sorted(left) == sorted(['C=CC', 'C=CC', 'CC', 'CC', 'CC', 'CC']) assert sorted(right) == sorted(['CC(C)C', 'CC(C)C', 'CCC', 'CCC']) return def test_split_smiles(): assert cbh.split_smiles("CC.CC") == ["CC", "CC"] assert cbh.split_smiles("2;CC", num_sep=";") == ["CC", "CC"] assert cbh.split_smiles(["CC", "CC.CC"]) == ["CC", "CC", "CC"] return def test_get_components_scheme1(): smiles = "C=[NH+]C" components = ["C=[NH2+]", "C[NH3+]"] output = cbh.get_components_scheme1("C=[NH+]C") assert sorted(components) == sorted(output) smiles = "C#[N+]C" components = ["C#[NH+]", "C[NH3+]"] output = cbh.get_components_scheme1(smiles) assert sorted(components) == sorted(output) smiles = "CC(=O)[O-]" components = ["CC", "C=O", "C[O-]"] output = cbh.get_components_scheme1(smiles) assert sorted(components) == sorted(output) smiles = "C[S+](C)C" components = ['C[SH2+]', 'C[SH2+]', 'C[SH2+]'] output = cbh.get_components_scheme1(smiles) assert sorted(components) == sorted(output) return def test_get_components_scheme2(): fun = cbh.get_components_scheme2 # getting the right number of H on N smiles = "CCc1c[nH]c2ccccc12" components = ['CCC', 'C=CN', 'CNC', 'C=CC', 'C=CC', 'C=CC', 'C=CC', 'C=C(C)C', 'C=C(C)C', 'C=C(C)N'] output = fun(smiles) assert sorted(components) == sorted(output) # connected smiles smiles = "C1CO1" components = ['CCO', 'CCO', 'COC'] output = fun(smiles) assert sorted(components) == sorted(output) return if __name__ == "__main__": print("use python3 -m pytest test.py") ```
{ "source": "jensengroup/GED", "score": 3 }
#### File: jensengroup/GED/GED.py ```python from rdkit import Chem import networkx as nx def get_graph(mol): Chem.Kekulize(mol) atoms = [atom.GetAtomicNum() for atom in mol.GetAtoms()] am = Chem.GetAdjacencyMatrix(mol,useBO=True) for i,atom in enumerate(atoms): am[i,i] = atom G = nx.from_numpy_matrix(am) return G mol1 = Chem.MolFromSmiles('c1ccccc1') #mol2 = Chem.MolFromSmiles('c1cnccc1') mol2 = Chem.MolFromSmiles('C=CC=CC=C') G1 = get_graph(mol1) G2 = get_graph(mol2) GDE = nx.graph_edit_distance(G1, G2, edge_match=lambda a,b: a['weight'] == b['weight']) print(GDE) ```
{ "source": "jensengroup/get_conformations", "score": 2 }
#### File: jensengroup/get_conformations/get_conformations.py ```python from rdkit import Chem from rdkit.Chem import AllChem from rdkit.Chem import rdMolTransforms import itertools import random def get_subset(number_of_conformations,max_number_of_conformations,angle_combinations): subset = [] subset_index = random.sample(range(number_of_conformations), max_number_of_conformations) for index in subset_index: subset.append(angle_combinations[index]) return subset def get_conformations(mol,max_number_of_conformations): raw_rot_bonds = mol.GetSubstructMatches(Chem.MolFromSmarts("[!#1]~[!$(*#*)&!D1]-!@[!$(*#*)&!D1]~[!#1]")) raw_rot_bonds += mol.GetSubstructMatches(Chem.MolFromSmarts("[*]~[*]-[O,S]-[#1]")) raw_rot_bonds += mol.GetSubstructMatches(Chem.MolFromSmarts("[*]~[*]-[NX3;H2]-[#1]")) bonds = [] rot_bonds = [] for k,i,j,l in raw_rot_bonds: if (i,j) not in bonds: bonds.append((i,j)) rot_bonds.append((k,i,j,l)) conf = mol.GetConformer() angles = [] for k,i,j,l in rot_bonds: theta = rdMolTransforms.GetDihedralDeg( conf, k,i,j,l ) angles.append([theta, theta+120., theta-120.]) angle_combinations = list(itertools.product(*angles)) number_of_conformations = len(angle_combinations) if number_of_conformations > max_number_of_conformations: angle_combinations = get_subset(number_of_conformations,max_number_of_conformations,angle_combinations) for angle_combination in angle_combinations: for (k,i,j,l),angle in zip(rot_bonds,angle_combination): rdMolTransforms.SetDihedralDeg( conf, k,i,j,l, angle ) mol.AddConformer(conf,assignId=True) #print mol.GetNumConformers() return mol if __name__ == "__main__": smiles = "C(C)(C)CCN" #smiles = "CCCC" #smiles = "C1(C2=CC=CN2)=NC=CC=C1" mol = Chem.MolFromSmiles(smiles) mol = Chem.AddHs(mol,explicitOnly=False) AllChem.EmbedMolecule(mol) AllChem.UFFOptimizeMolecule(mol) max_conf = 10 mol = get_conformations(mol,max_conf) w = Chem.SDWriter('conformers.sdf') for i,conf in enumerate(mol.GetConformers()): tm = Chem.Mol(mol,False,conf.GetId()) w.write(tm) ```
{ "source": "jensengroup/mol_gen", "score": 2 }
#### File: jensengroup/mol_gen/analyze_ZINC.py ```python from rdkit import Chem from rdkit.Chem import AllChem from rdkit.Chem import Draw from rdkit.Chem.Draw.MolDrawing import MolDrawing from rdkit.Chem.Draw import IPythonConsole import numpy as np import pickle def read_file(file_name): smiles_list = [] with open(file_name,'r') as file: for smiles in file: smiles_list.append(smiles) return smiles_list def get_probs(smarts_list,smiles_list,ring=False): import collections bonds = [] probs = collections.OrderedDict() for smarts in smarts_list: probs[smarts] = 0 number_of_molecules = 0 tot = 0 for smiles in smiles_list: #print smiles number_of_molecules += 1 mol = Chem.MolFromSmiles(smiles) Chem.Kekulize(mol) for smarts in smarts_list: matches = mol.GetSubstructMatches(Chem.MolFromSmarts(smarts),uniquify=ring) num_bonds = len(matches) probs[smarts] += num_bonds tot += num_bonds tot = 0 probs2 = collections.OrderedDict() for key in probs: if probs[key] > 0: #print key, probs[key] tot += probs[key] probs2[key] = probs[key] return tot, probs2 def get_p(probs): p = [] for key in probs: p.append(float(probs[key])/tot) return p def get_rxn_smarts_make_rings(probs): X = {'[#6R': 'X4', '[#7R': 'X3'} rxn_smarts = [] for key in probs: tokens = key.split(']') smarts = '' if '=' in key: smarts += tokens[0][:-1] + X[tokens[0]] + ';!R:1]' else: smarts += tokens[0][:-1] + ';!R:1]=,' smarts += tokens[2][:-1] + ';!R:2]>>' smarts += '[*:1]1' + tokens[1] + '][*:2]1' rxn_smarts.append(smarts) return rxn_smarts def get_rxn_smarts_rings(probs): X = {'[#6R': 'X4', '[#7R': 'X3'} rxn_smarts = [] for key in probs: tokens = key.split(']') smarts = '' if '=' in key: smarts += tokens[0] + X[tokens[0]] + ';!r6;!r7;!R2:1]' else: smarts += tokens[0] + ';!r6;!r7;!R2:1]' smarts += tokens[2] + ';!r6;!r7:2]>>' smarts += '[*:1]' + tokens[1] + '][*:2]' rxn_smarts.append(smarts) return rxn_smarts def get_rxn_smarts(probs): rxn_smarts = [] for key in probs: smarts = '' tokens = key.split(']') smarts = tokens[0] if '-' in key and '#16' not in key: smarts += ';!H0:1]>>[*:1]' if '=' in key and '#16' not in key: smarts += ';!H1;!H0:1]>>[*:1]' if ']#[' in key: smarts += ';H3:1]>>[*:1]' if '#16' in key: smarts += ':1]>>[*:1]' smarts += tokens[-2] + ']' rxn_smarts.append(smarts) return rxn_smarts file_name = '1000.smi' elements = ['#5','#6','#7','#8','#9','#14','#15','#16','#17','#35','#53'] bonds = ['-','=','#'] smiles_list = read_file(file_name) smarts = [] for element in elements: smarts.append('['+element+']') print get_probs(smarts,smiles_list) smarts = [] for element in elements: smarts.append('['+element+'R]') tot_Ratoms,probs_Ratoms = get_probs(smarts,smiles_list) print tot_Ratoms,probs_Ratoms R_elements = [] for key in probs_Ratoms: R_elements.append(key) print R_elements smarts = [] for i,e1 in enumerate(R_elements): for e2 in R_elements: for j,e3 in enumerate(R_elements): if j >= i: sm_s = e1 + '-' + e2 + '-' + e3 if sm_s not in smarts: smarts.append(sm_s) sm_d = e1 + '=' + e2 + '-' + e3 if sm_d not in smarts: smarts.append(sm_d) print len(smarts),smarts tot,probs = get_probs(smarts,smiles_list,ring=True) print tot,probs import operator sorted_x = sorted(probs.items(), key=operator.itemgetter(1), reverse=True) count = 0 for i in range(len(sorted_x)): print sorted_x[i][0],sorted_x[i][1] if '=' in sorted_x[i][0]: count += sorted_x[i][1] print count rxn_smarts_rings = get_rxn_smarts_rings(probs) print rxn_smarts_rings rxn_smarts_make_rings = get_rxn_smarts_make_rings(probs) print rxn_smarts_make_rings p_rings = get_p(probs) print p_rings pickle.dump(p_rings,open('p_ring.p','wb')) pickle.dump(rxn_smarts_rings,open('rs_ring.p','wb')) pickle.dump(rxn_smarts_make_rings,open('rs_make_ring.p','wb')) smarts = [] for bond in bonds: for element1 in elements: for element2 in elements: smarts.append('['+element1+']'+bond+'['+element2+';!R]') print len(smarts) tot,probs = get_probs(smarts,smiles_list) print tot, probs p = get_p(probs) print p pickle.dump(p,open('p1.p','wb')) rxn_smarts = get_rxn_smarts(probs) print rxn_smarts pickle.dump(rxn_smarts,open('r_s1.p','wb')) ``` #### File: jensengroup/mol_gen/GA_mol.py ```python from rdkit import Chem from rdkit.Chem import AllChem from rdkit.Chem import Draw from rdkit.Chem.Draw.MolDrawing import MolDrawing from rdkit.Chem.Draw import IPythonConsole from rdkit.Chem import Descriptors from rdkit.Chem import rdmolops import numpy as np import random import time import sascorer import crossover as co import mutate as mu def read_file(file_name): mol_list = [] with open(file_name,'r') as file: for smiles in file: mol_list.append(Chem.MolFromSmiles(smiles)) return mol_list def make_initial_population(population_size,file_name): mol_list = read_file(file_name) population = [] for i in range(population_size): population.append(random.choice(mol_list)) return population def logP_score(m): try: logp = Descriptors.MolLogP(m) except: print m, Chem.MolToSmiles(m) SA_score = -sascorer.calculateScore(m) #cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(m))) cycle_list = m.GetRingInfo().AtomRings() #remove networkx dependence if len(cycle_list) == 0: cycle_length = 0 else: cycle_length = max([ len(j) for j in cycle_list ]) if cycle_length <= 6: cycle_length = 0 else: cycle_length = cycle_length - 6 cycle_score = -cycle_length #print cycle_score #print SA_score #print logp SA_score_norm=(SA_score-SA_mean)/SA_std logp_norm=(logp-logP_mean)/logP_std cycle_score_norm=(cycle_score-cycle_mean)/cycle_std score_one = SA_score_norm + logp_norm + cycle_score_norm global max_score global count count += 1 if score_one > max_score[0]: max_score = [score_one, Chem.MolToSmiles(m)] return score_one def calculate_normalized_fitness(population): fitness = [] for gene in population: score = logP_score(gene) fitness.append(max(float(score),0.0)) #calculate probability sum_fitness = sum(fitness) normalized_fitness = [score/sum_fitness for score in fitness] return normalized_fitness def make_mating_pool(population,fitness): mating_pool = [] for i in range(population_size): mating_pool.append(np.random.choice(population, p=fitness)) return mating_pool def reproduce(mating_pool,population_size,mutation_rate): new_population = [] for n in range(population_size): parent_A = random.choice(mating_pool) parent_B = random.choice(mating_pool) #print Chem.MolToSmiles(parent_A),Chem.MolToSmiles(parent_B) new_child = co.crossover(parent_A,parent_B) #print new_child if new_child != None: new_child = mu.mutate(new_child,mutation_rate) #print "after mutation",new_child if new_child != None: new_population.append(new_child) return new_population global max_score global count logP_values = np.loadtxt('logP_values.txt') SA_scores = np.loadtxt('SA_scores.txt') cycle_scores = np.loadtxt('cycle_scores.txt') SA_mean = np.mean(SA_scores) SA_std=np.std(SA_scores) logP_mean = np.mean(logP_values) logP_std= np.std(logP_values) cycle_mean = np.mean(cycle_scores) cycle_std=np.std(cycle_scores) population_size = 20 generations = 50 mutation_rate = 0.01 co.average_size = 39.15 co.size_stdev = 3.50 print 'population_size', population_size print 'generations', generations print 'mutation_rate', mutation_rate print 'average_size/size_stdev', co.average_size, co.size_stdev print '' file_name = '1000.smi' results = [] size = [] t0 = time.time() for i in range(10): max_score = [-99999.,''] count = 0 population = make_initial_population(population_size,file_name) for generation in range(generations): #if generation%10 == 0: print generation fitness = calculate_normalized_fitness(population) mating_pool = make_mating_pool(population,fitness) population = reproduce(mating_pool,population_size,mutation_rate) print i, max_score[0], max_score[1], Chem.MolFromSmiles(max_score[1]).GetNumAtoms() results.append(max_score[0]) size.append(Chem.MolFromSmiles(max_score[1]).GetNumAtoms()) t1 = time.time() print '' print 'time ',t1-t0 print max(results),np.array(results).mean(),np.array(results).std() print max(size),np.array(size).mean(),np.array(size).std() ```
{ "source": "jensengroup/prohxms", "score": 3 }
#### File: prohxms/fit_scripts/fit_training.py ```python import sys import string import scipy from scipy.stats import beta import numpy def parse_data(filename): x, y = [], [] f = open(filename, "r") for line in f.readlines(): tokens = string.split(line) x.append(float(tokens[0])) y.append(float(tokens[1])) f.close() return x, y data_file = sys.argv[1] x, y = parse_data(data_file) n_bins = 3 bin_step = 100.0/n_bins bin_edges = numpy.arange(0.0, 100.0 + bin_step, bin_step) print bin_edges bin_data = [[] for _ in range(len(bin_edges) - 1)] for i in range(len(x)): n_hb = y[i] val = x[i] for j in range(len(bin_data)): if (val <= bin_edges[j+1]) and (val > bin_edges[j]): bin_data[j].append(n_hb) for i, data in enumerate(bin_data): tdata = numpy.array(data) params = beta.fit(tdata, fscale=1) print i, params[0], params[1], params[2], params[3] #print tdata ``` #### File: prohxms/fit_scripts/plot_all_data.py ```python import sys import string import math import os.path import cPickle import Bio.PDB from matplotlib import pyplot HB_BOND_CUT_OFF = -0.5 def get_donors(model): donors = [] for chain in model: for residue in chain: if (residue.has_id("H") or residue.has_id("HN")) and residue.has_id("N"): donors.append(residue) return donors def get_bb_acceptors(model): acceptors = [] for chain in model: for residue in chain: if residue.has_id("C") and residue.has_id("O"):acceptors.append(residue) return acceptors def get_sc_acceptors(model): donors = [] for chain in model: for residue in chain: if residue.get_resname() == "ASP": if residue.has_id("CG") and residue.has_id("OD1") \ and ( residue.has_id("CG") and residue.has_id("OD2")): donors.append(residue) elif residue.get_resname() == "GLU": if residue.has_id("CD") and residue.has_id("OE1") \ and ( residue.has_id("CD") and residue.has_id("OE2")): donors.append(residue) elif residue.get_resname() == "ASN": if residue.has_id("CG") and residue.has_id("OD1"): donors.append(residue) elif residue.get_resname() == "GLN": if residue.has_id("CD") and residue.has_id("OE1"): donors.append(residue) elif residue.get_resname() == "SER": if residue.has_id("CB") and residue.has_id("OG"): donors.append(residue) elif residue.get_resname() == "THR": if residue.has_id("CB") and residue.has_id("OG1"): donors.append(residue) elif residue.get_resname() == "TYR": if residue.has_id("CZ") and residue.has_id("OH"): donors.append(residue) return donors def get_model(filename): parser = Bio.PDB.PDBParser(QUIET=True) structure = parser.get_structure("pdb", filename) for model in structure: # Return first model return model def calc_energy(n, h, o, c): r_on = (o - n).norm() r_ch = (c - h).norm() r_oh = (o - h).norm() r_cn = (c - n).norm() e = 0.084 * ( 1.0/r_on + 1.0/r_ch - 1.0/r_oh - 1.0/r_cn) * 332.0 return e def has_bb_hbond(donor, acceptor): if donor.get_id()[1] == acceptor.get_id()[1] or \ donor.get_id()[1] == acceptor.get_id()[1] + 1: return False o = acceptor['O'].get_vector() c = acceptor['C'].get_vector() try: h = donor['H'].get_vector() except: h = donor['HN'].get_vector() n = donor['N'].get_vector() e = calc_energy(n, h, o, c) if e < HB_BOND_CUT_OFF: print donor.get_id()[1], " -> ", acceptor.get_id()[1], "%4.2f" % (e) return True else: return False def has_sc_hbond(donor, acceptor): try: h = donor['H'].get_vector() except: h = donor['HN'].get_vector() n = donor['N'].get_vector() e1 = float("inf") e2 = float("inf") if acceptor.get_resname() == "ASP": if acceptor.has_id("CG") and acceptor.has_id("OD1"): o = acceptor['OD1'].get_vector() c = acceptor['CG'].get_vector() e1 = calc_energy(n, h, o, c) if acceptor.has_id("CG") and acceptor.has_id("OD2"): o = acceptor['OD2'].get_vector() c = acceptor['CG'].get_vector() e2 = calc_energy(n, h, o, c) elif acceptor.get_resname() == "GLU": if acceptor.has_id("CD") and acceptor.has_id("OE1"): o = acceptor['OE1'].get_vector() c = acceptor['CD'].get_vector() e1 = calc_energy(n, h, o, c) if acceptor.has_id("CD") and acceptor.has_id("OE2"): o = acceptor['OE2'].get_vector() c = acceptor['CD'].get_vector() e2 = calc_energy(n, h, o, c) elif acceptor.get_resname() == "ASN": if acceptor.has_id("CG") and acceptor.has_id("OD1"): o = acceptor['OD1'].get_vector() c = acceptor['CG'].get_vector() e1 = calc_energy(n, h, o, c) elif acceptor.get_resname() == "GLN": if acceptor.has_id("CD") and acceptor.has_id("OE1"): o = acceptor['OE1'].get_vector() c = acceptor['CD'].get_vector() e1 = calc_energy(n, h, o, c) elif acceptor.get_resname() == "SER": if acceptor.has_id("CB") and acceptor.has_id("OG"): o = acceptor['OG'].get_vector() c = acceptor['CB'].get_vector() e1 = calc_energy(n, h, o, c) elif acceptor.get_resname() == "THR": if acceptor.has_id("CB") and acceptor.has_id("OG1"): o = acceptor['OG1'].get_vector() c = acceptor['CB'].get_vector() e1 = calc_energy(n, h, o, c) elif acceptor.get_resname() == "TYR": if acceptor.has_id("CZ") and acceptor.has_id("OH"): o = acceptor['OH'].get_vector() c = acceptor['CZ'].get_vector() e1 = calc_energy(n, h, o, c) if (e1 < HB_BOND_CUT_OFF) or (e2 < HB_BOND_CUT_OFF): print donor.get_id()[1], " -> ", acceptor.get_id()[1], min(e1, e2) return True else: return False def calc_hbonds(model): donors = get_donors(model) bb_acceptors = get_bb_acceptors(model) sc_acceptors = get_sc_acceptors(model) hbonds = [] for donor in donors: for acceptor in bb_acceptors: if has_bb_hbond(donor, acceptor): hbonds.append(donor.get_id()[1]) for acceptor in sc_acceptors: if has_sc_hbond(donor, acceptor): hbonds.append(donor.get_id()[1]) return hbonds def parse_data(filename): data = [] f = open(filename, "r") lines = f.readlines() for line in lines: if line[0] == "#" or len(line) < 4: continue tokens = string.split(line) start = int(tokens[0]) end = int(tokens[1]) uptake = float(tokens[2])/100.0 residues = range(start, end + 1) length = len(residues) data_point = dict() data_point['residues'] = residues data_point['uptake'] = uptake data_point['length'] = length data.append(data_point) f.close() return data def load_pickle(filename): f = open(filename,"rb") p = cPickle.load(f) f.close() return(p) if __name__ == "__main__": data_filename = sys.argv[1] data = parse_data(data_filename) x = [] y = [] pickle_filenames = ["clean_structures/1axi.pqr.cpickle", "clean_structures/3hhr.pqr.cpickle", "clean_structures/1hgu.pqr.cpickle", "clean_structures/hgh_model.cpickle"] for pickle_filename in pickle_filenames: hbonds = load_pickle(pickle_filename) for d in data: #print d n_bonds = 0.0 for residue in d['residues']: if residue in hbonds: n_bonds += 1 # print d, d['uptake'], n_bonds/d['length'], d['length'] if n_bonds/d['length'] < 0.1: print d, d['uptake'], n_bonds/d['length'], pickle_filename x.append(d['uptake'] * 100.0) y.append(n_bonds/d['length']) pyplot.plot(x, y, 'ko') pyplot.xlabel("Deuterium uptake [%]") pyplot.ylabel("Avg. hbonds") pyplot.ylim([0.0, 1.0]) pyplot.xlim([0.0, 100.0]) pyplot.grid(True) pyplot.title("all structures" + data_filename) png_filename = "all_structures_" + data_filename.rstrip(".txt") + ".png" print png_filename pyplot.savefig(png_filename) ``` #### File: prohxms/fit_scripts/plot_binom.py ```python import numpy from scipy.special import binom, gamma from matplotlib import pyplot import sys def factorial(x): try: return gamma(x + 1) except OverflowError: print "Overflow, x =",x exit(0) def B(x, y): return factorial(x - 1) * factorial(y - 1) / factorial(x + y - 1) n = int(sys.argv[1]) mu = float(sys.argv[2]) sigma = float(sys.argv[3]) alpha = - mu * (mu * mu - mu * n + sigma * sigma) / (sigma * sigma * n + mu * mu - mu * n) beta = (n * alpha) / mu - alpha alpha = float(sys.argv[2]) beta = float(sys.argv[3]) if (alpha < 0.0) or (beta < 0.0): print "ERROR: Negative parameter value:" print "alpha =", alpha, "beta =", beta exit(0) sigma = numpy.sqrt( n * alpha * beta * (alpha + beta + n) / ((alpha + beta) * (alpha + beta) * (1 + alpha + beta))) mu = n * alpha / (alpha + beta) print "alpha =", alpha, "beta =", beta print "mu = %f sigma = %f" % (mu, sigma) def beta_binom(k): return binom(n, k) * B(k + alpha, n - k + beta) / B(alpha, beta) for k in range(0, n + 1): print "P(N =%3i) = %6.4f" % (k, beta_binom(k)) pyplot.rc('text', usetex=True) pyplot.rc('font', family='serif') vals = numpy.arange(0, n + 1) probs = numpy.array([beta_binom(val) for val in vals]) bar_width = 0.55 pyplot.bar(vals + bar_width/2, probs, bar_width, color = 'DarkSlateBlue', alpha=0.6) pyplot.title(r"$n = %i,\ \mu= %5.2f,\ \sigma = %5.2f\ (\alpha = %5.2f,\ \beta = %5.2f)$" % (n, mu, sigma, alpha, beta), fontsize=20) val_texts = [r"$%i$" % (val) for val in vals] pyplot.xlabel(r"$k$", fontsize=16) pyplot.xticks(vals + bar_width, val_texts, fontsize=16) pyplot.xlim([0.0, numpy.amax(vals) + bar_width*2]) pyplot.yticks(fontsize=16) pyplot.ylabel(r"$P(N_\mathrm{HB}=k)$", fontsize=16) pyplot.grid(True) pyplot.savefig("bar.png") ``` #### File: jensengroup/prohxms/get_log_likelihood.py ```python import sys import os from numpy import log import prohxms_api as prohxms from parameters import model1 as parameters # Ideal gas constant in kcal/mol R_GAS = 0.001987204118 # 300 kelvin in units of kelvin. T300K = 300.0 # R * T at 300K (i.e. kB * T in units of kcal/mol) RT = R_GAS * T300K def get_log_lik(k, n, uptake): alpha = None beta = None for parameter in parameters: if uptake < parameter[0]: (max_uptake, alpha, beta) = parameter if alpha is None or beta is None: print "An error occurred determining alpha and beta" exit(1) p = prohxms.beta_binom(k, n, alpha, beta) log_lik = -1.0 * log(p) return log_lik if __name__ == "__main__": pdb_filename = sys.argv[1] data_filename = sys.argv[2] model = prohxms.get_model(pdb_filename) hbonds = prohxms.calc_hbonds(model) data = prohxms.parse_data(data_filename) log_lik_sum = 0.0 for d in data: #print d n_bonds = 0.0 for residue in d['residues']: if residue in hbonds: n_bonds += 1 # print d, d['uptake'], n_bonds/d['length'], d['length'] k = n_bonds n = d['length'] uptake = d['uptake'] log_lik = get_log_lik(k, n, uptake) log_lik_sum += log_lik #print d, d['uptake'], n_bonds/d['length'], log_lik print " E = %4.3f kcal/mol" % (log_lik_sum * RT) ```
{ "source": "jensengrouppsu/rapid", "score": 2 }
#### File: rapid/gui/exchange.py ```python from __future__ import print_function, division, absolute_import # Std. lib imports from math import pi from textwrap import dedent # Non-std. lib imports from PySide.QtCore import Signal, QObject, Qt, QRegExp from PySide.QtGui import QGroupBox, QVBoxLayout, QHBoxLayout, QLabel, \ QLineEdit, QComboBox, QStringListModel, QCheckBox, \ QGridLayout, QDoubleValidator, QRadioButton from numpy.testing import assert_approx_equal from numpy import zeros, vstack, ndenumerate, ndindex, ndarray # Local imports from rapid.common import ZMat from rapid.gui.guicommon import error from rapid.gui.guicommon import toolTipText as ttt HZ2WAVENUM = 1 / ( 100 * 2.99792458E8 * 2 * pi ) indexrole = Qt.UserRole raterole = Qt.UserRole+1 class ExchangeView(QGroupBox): '''The box containing the rate value''' def __init__(self, title = 'Peak Exchange Matrix', parent = None): '''Initialize''' super(ExchangeView, self).__init__(parent) self.setTitle(title) self._createWidgets() def _createWidgets(self): '''Create the widgets contained in this box''' # Peak number chooser self.numpeaks = [QRadioButton("2"), QRadioButton("3"), QRadioButton("4")] self.numpeaks[0].setToolTip(ttt('Model the exchange of 2 peaks')) self.numpeaks[1].setToolTip(ttt('Model the exchange of 3 peaks')) self.numpeaks[2].setToolTip(ttt('Model the exchange of 4 peaks')) # Make 4x4 matrix of QLabels self.exview = [[QLabel(self) for i in xrange(4)] for j in xrange(4)] for i in xrange(4): for e in self.exview[i]: e.setToolTip(ttt('The current exchange matrix')) # Enforce symmetry button self.symmetry = QCheckBox("Enforce Symmetry", self) self.symmetry.setToolTip(ttt('If symmetry is on then you only need to ' 'manually set the upper triangle of the ' 'exchange matrix. Thse values are ' 'mirrored ' 'in the lower triangle and the diagonals ' 'are automatically set so that each row ' 'sums to 1. ' 'Otherwise you must set every element')) # Exchange picker self.exchooser = QComboBox(self) self.exchooser.setToolTip(ttt('Choose between which two peaks to set ' 'the exchange (relative) rate')) # Exchange value self.exvalue = QLineEdit(self) self.exvalue.setToolTip(ttt('The exchange (relative) rate')) self.exvalue.setValidator(QDoubleValidator(0.0, 1.0, 3, self.exvalue)) def makeConnections(self): '''Connect the widgets together''' # When the table has been resized, tidy it up self.matrix.matrixChanged.connect(self.resetMatrix) # If the check state changed, change the data model self.symmetry.stateChanged.connect(self.changeDataModel) self.numpeaks[0].clicked.connect(self.changeDataModel) self.numpeaks[1].clicked.connect(self.changeDataModel) self.numpeaks[2].clicked.connect(self.changeDataModel) # Attach the chooser to an exchange rate self.exchooser.currentIndexChanged.connect(self.attachExchange) # If the exchange rate is changed, update the matrix self.exvalue.editingFinished.connect(self.newExchange) def initUI(self): '''Lays out the widgets''' nums = QHBoxLayout() nums.addWidget(QLabel("Number of Peaks: ")) nums.addWidget(self.numpeaks[0]) nums.addWidget(self.numpeaks[1]) nums.addWidget(self.numpeaks[2]) val = QHBoxLayout() val.addWidget(QLabel("Exchange: ")) val.addStretch() val.addWidget(self.exchooser) self.exvalue.setMaximumWidth(50) val.addWidget(self.exvalue) ex = QGridLayout() for i in xrange(4): for j in xrange(4): ex.addWidget(self.exview[i][j], i+1, j+1) lo = QVBoxLayout() lo.addLayout(nums) lo.addWidget(self.symmetry) lo.addLayout(val) lo.addLayout(ex) self.setLayout(lo) def setModel(self, model, npmodel): '''Attaches models to the views.''' self.matrix = model self.npmodel = npmodel def setNumPeaks(self, npeaks): '''Manually set the number of peaks''' if npeaks == 2: self.numpeaks[0].click() elif npeaks == 3: self.numpeaks[1].click() elif npeaks == 4: self.numpeaks[2].click() else: error.showMessage('Only valid number of peaks is 2, 3, or 4') def setMatrixSymmetry(self, sym): '''Manually set the matrix symmetry''' self.symmetry.setChecked(sym) def setMatrix(self, Z): '''Manually set the matrix elements with a numpy matrix''' npeaks = self.npmodel.getNumPeaks() self.matrix.matrix = Z[0:npeaks,0:npeaks] self.matrix.updateInternalModel(npeaks) self.resetMatrix() ####### # SLOTS ####### def newExchange(self): '''Prepares an exchange value to be broadcasted''' try: value = round(float(self.exvalue.text()), 3) except ValueError: value = 0.0 indx = self.exchooser.currentIndex() if self.numpeaks[0].isChecked(): npeaks = 2 elif self.numpeaks[1].isChecked(): npeaks = 3 elif self.numpeaks[2].isChecked(): npeaks = 4 self.matrix.updateExchange(value, indx, npeaks) def resetMatrix(self): '''Reset the matrix values''' # Iterate over the matrix and fill the values for index, num in ndenumerate(self.matrix.matrix): self.exview[index[0]][index[1]].setText('{0:.3f}'.format(num)) # Set all other values to two dashes if len(self.matrix.matrix) == 2: for i in xrange(4): for j in xrange(4): if not (i < 2 and j < 2): self.exview[i][j].setText('--') elif len(self.matrix.matrix) == 3: for i in xrange(4): for j in xrange(4): if not (i < 3 and j < 3): self.exview[i][j].setText('--') def changeDataModel(self): '''Change the matrix from symmetric to not or vice versa''' # Change the model for the combo box if self.numpeaks[0].isChecked(): npeaks = 2 elif self.numpeaks[1].isChecked(): npeaks = 3 elif self.numpeaks[2].isChecked(): npeaks = 4 self.npmodel.setNumPeaks(npeaks) self.matrix.sym = self.symmetry.isChecked() if self.matrix.sym: self.exchooser.setModel(self.matrix.symex[npeaks]) else: self.exchooser.setModel(self.matrix.unsymex[npeaks]) # Reset the matrix self.matrix.setMatrix(npeaks) def attachExchange(self, indx): '''Attach a new exchange rate to the chooser''' r = self.matrix.symrate if self.matrix.sym else self.matrix.unsymrate self.exvalue.setText('{0:.3f}'.format(r[self.npmodel.numPeaks][indx])) ######### # SIGNALS ######### #/\/\/\/\/\/\/\/ # NumPeaks Model #/\/\/\/\/\/\/\/ class NumPeaks(QObject): '''Class to hold all information about the function''' def __init__(self, parent = None): '''Initiallize the function class''' super(NumPeaks, self).__init__(parent) self.numPeaks = 2 def getNumPeaks(self): '''Return the number of peak''' return self.numPeaks ####### # SLOTS ####### def setNumPeaks(self, num): '''Sets the number of peaks''' self.numPeaks = num self.numberOfPeaksChanged.emit(self.numPeaks) ######### # SIGNALS ######### # The number of peaks numberOfPeaksChanged = Signal(int) #/\/\/\/\/\/\/\/ # Exchange Model #/\/\/\/\/\/\/\/ class ExchangeModel(QObject): '''Class to hold all information about the function''' def __init__(self, parent = None): '''Initiallize the function class''' super(ExchangeModel, self).__init__(parent) self.matrix = zeros((2,2)) self.sym = True # Make the exchange models # First make the models with the label seen by the user sym = ['1 -> 2', '1 -> 2_1 -> 3_2 -> 3', '1 -> 2_1 -> 3_1 -> 4_2 -> 3_2 -> 4_3 -> 4'] nosym = ['1_1 -> 2_2 -> 1_2', '1_1 -> 2_1 -> 3_2 -> 1_2_2 -> 3_3 -> 1_3 -> 2_3', '_'.join(['1', '1 -> 2', '1 -> 3', '1 -> 4', '2 -> 1', '2', '2 -> 3', '2 -> 4', '3 -> 1', '3 -> 2', '3', '3 -> 4', '4 -> 1', '4 -> 2', '4 -> 3', '4'])] # A function to convert a string to a QStringListModel f = lambda x: QStringListModel(x.split('_')) # The two empty elements are so the model can correspond to the number # of peaks self.symex = ['', ''] + [f(x) for x in sym] self.unsymex = ['', ''] + [f(x) for x in nosym] # Set the indices and rates for each label self.symindx = ((), (), ((0,1),), ((0,1), (0,2), (1,2)), ((0,1), (0,2), (0,3), (1,2), (1,3), (2,3))) self.unsymindx = ((), (), ((0,0), (0,1), (1,0), (1,1)), ((0,0), (0,1), (0,2), (1,0), (1,1), (1,2), (2,0), (2,1), (2,2)), ((0,0), (0,1), (0,2), (0,3), (1,0), (1,1), (1,2), (1,3), (2,0), (2,1), (2,2), (2,3), (3,0), (3,1), (3,2), (3,3))) self.symrate = [[], [], [0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] self.unsymrate = [[], [], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] self._makeConnections() def _makeConnections(self): '''Connects the contained objects together''' pass def setMatrix(self, npeaks): '''Set the values into the matrix''' # Make an alias for the proper model based on symmetry or not indx = self.symindx[npeaks] if self.sym else self.unsymindx[npeaks] rate = self.symrate[npeaks] if self.sym else self.unsymrate[npeaks] # Send values to the ZMat constructor then update self.matrix = ZMat(npeaks, indx, rate, self.sym) self.updateInternalModel(npeaks) def updateInternalModel(self, npeaks): '''Updates the internal representation from the matrix''' # Use the new values to update the internal matrix model if npeaks == 2: self.unsymrate[2][0] = self.matrix[0,0] self.unsymrate[2][1] = self.matrix[0,1] self.unsymrate[2][2] = self.matrix[1,0] self.unsymrate[2][3] = self.matrix[1,1] self.unsymrate[3][0] = self.matrix[0,0] self.unsymrate[3][1] = self.matrix[0,1] self.unsymrate[3][3] = self.matrix[1,0] self.unsymrate[3][4] = self.matrix[1,1] self.unsymrate[4][0] = self.matrix[0,0] self.unsymrate[4][1] = self.matrix[0,1] self.unsymrate[4][4] = self.matrix[1,0] self.unsymrate[4][5] = self.matrix[1,1] self.symrate[2][0] = self.matrix[0,1] self.symrate[3][0] = self.matrix[0,1] self.symrate[4][0] = self.matrix[0,1] elif npeaks == 3: self.unsymrate[2][0] = self.matrix[0,0] self.unsymrate[2][1] = self.matrix[0,1] self.unsymrate[2][2] = self.matrix[1,0] self.unsymrate[2][3] = self.matrix[1,1] self.unsymrate[3][0] = self.matrix[0,0] self.unsymrate[3][1] = self.matrix[0,1] self.unsymrate[3][2] = self.matrix[0,2] self.unsymrate[3][3] = self.matrix[1,0] self.unsymrate[3][4] = self.matrix[1,1] self.unsymrate[3][5] = self.matrix[1,2] self.unsymrate[3][6] = self.matrix[2,0] self.unsymrate[3][7] = self.matrix[2,1] self.unsymrate[3][8] = self.matrix[2,2] self.unsymrate[4][0] = self.matrix[0,0] self.unsymrate[4][1] = self.matrix[0,1] self.unsymrate[4][3] = self.matrix[0,2] self.unsymrate[4][5] = self.matrix[1,0] self.unsymrate[4][6] = self.matrix[1,1] self.unsymrate[4][7] = self.matrix[1,2] self.unsymrate[4][9] = self.matrix[2,0] self.unsymrate[4][10] = self.matrix[2,1] self.unsymrate[4][11] = self.matrix[2,2] self.symrate[2][0] = self.matrix[0,1] self.symrate[3][0] = self.matrix[0,1] self.symrate[3][1] = self.matrix[0,2] self.symrate[3][2] = self.matrix[1,2] self.symrate[4][0] = self.matrix[0,1] self.symrate[4][1] = self.matrix[0,2] self.symrate[4][3] = self.matrix[1,2] elif npeaks == 4: self.unsymrate[2][0] = self.matrix[0,0] self.unsymrate[2][1] = self.matrix[0,1] self.unsymrate[2][2] = self.matrix[1,0] self.unsymrate[2][3] = self.matrix[1,1] self.unsymrate[3][0] = self.matrix[0,0] self.unsymrate[3][1] = self.matrix[0,1] self.unsymrate[3][2] = self.matrix[0,2] self.unsymrate[3][3] = self.matrix[1,0] self.unsymrate[3][4] = self.matrix[1,1] self.unsymrate[3][5] = self.matrix[1,2] self.unsymrate[3][6] = self.matrix[2,0] self.unsymrate[3][7] = self.matrix[2,1] self.unsymrate[3][8] = self.matrix[2,2] self.unsymrate[4][0] = self.matrix[0,0] self.unsymrate[4][1] = self.matrix[0,1] self.unsymrate[4][2] = self.matrix[0,2] self.unsymrate[4][3] = self.matrix[0,3] self.unsymrate[4][4] = self.matrix[1,0] self.unsymrate[4][5] = self.matrix[1,1] self.unsymrate[4][6] = self.matrix[1,2] self.unsymrate[4][7] = self.matrix[1,3] self.unsymrate[4][8] = self.matrix[2,0] self.unsymrate[4][9] = self.matrix[2,1] self.unsymrate[4][10] = self.matrix[2,2] self.unsymrate[4][11] = self.matrix[2,3] self.unsymrate[4][12] = self.matrix[3,0] self.unsymrate[4][13] = self.matrix[3,1] self.unsymrate[4][14] = self.matrix[3,2] self.unsymrate[4][15] = self.matrix[3,3] self.symrate[2][0] = self.matrix[0,1] self.symrate[3][0] = self.matrix[0,1] self.symrate[3][1] = self.matrix[0,2] self.symrate[3][2] = self.matrix[1,2] self.symrate[4][0] = self.matrix[0,1] self.symrate[4][1] = self.matrix[0,2] self.symrate[4][2] = self.matrix[0,3] self.symrate[4][3] = self.matrix[1,2] self.symrate[4][4] = self.matrix[1,3] self.symrate[4][5] = self.matrix[2,3] self.matrixChanged.emit() def getParams(self, npeaks): '''Return the exchange parameters''' if self.sym: return self.symrate[npeaks], self.symindx[npeaks], True else: return self.unsymrate[npeaks], self.unsymindx[npeaks], False def getMatrix(self): '''Return the actual exchange matrix''' return self.matrix ####### # SLOTS ####### def resizeMatrix(self, npeaks): '''Resize the matrix to the number of peaks''' self.matrix = zeros((npeaks,npeaks)) self.setMatrix(npeaks) def updateExchange(self, value, indx, npeaks): '''Updates the exchange with a new value''' model = self.symrate if self.sym else self.unsymrate model[npeaks][indx] = value self.setMatrix(npeaks) ######### # SIGNALS ######### # Alert when the matrix is new matrixChanged = Signal() ``` #### File: rapid/gui/peak.py ```python from __future__ import print_function, division, absolute_import # Non-std. lib imports from PySide.QtCore import Signal, QObject from PySide.QtGui import QTabWidget, QVBoxLayout, QWidget, QLineEdit, \ QDoubleValidator, QLabel, QGridLayout from numpy import asarray, nan # Local imports from rapid.gui.guicommon import toolTipText as ttt class PeakView(QTabWidget): '''Class to display the peak information''' def __init__(self, parent = None): '''Initialize''' super(PeakView, self).__init__(parent) self._createWidgets() def _createWidgets(self): '''Create the widgets contained in this box''' self.pages = [ PeakPage("Peak 1", 0), PeakPage("Peak 2", 1), PeakPage("Peak 3", 2), PeakPage("Peak 4", 3), ] def initUI(self): '''Initilizes the layout of the contained widgets''' self.addTab(self.pages[0], self.pages[0].title) self.addTab(self.pages[1], self.pages[1].title) self.npeaks = 2 def makeConnections(self): '''Connect all the contained widgets together''' self.model.newParams.connect(self.distributeNewParams) self.model.changeNumPeaks.connect(self.changePeakNum) self.pages[0].changeInputParams.connect(self.model.setInputParams) self.pages[1].changeInputParams.connect(self.model.setInputParams) self.pages[2].changeInputParams.connect(self.model.setInputParams) self.pages[3].changeInputParams.connect(self.model.setInputParams) def setModel(self, model): '''Attaches a model to this view''' self.model = model def setPeaks(self, vib, GL, GG, h): '''Manually set the peak data''' for i, vals in enumerate(zip(vib, GL, GG, h)): self.pages[i].setParams(*vals) ####### # SLOTS ####### def distributeNewParams(self, p, GL, GG, h): '''When the new parameters are given, send the results to the appropriate page''' for i, vals in enumerate(zip(p, GL, GG, h)): self.pages[i].viewNewParams(*vals) def changePeakNum(self, npeaks): '''Change the number of peaks by adding or removing tabs''' if self.npeaks == 2: if npeaks == 3: self.addTab(self.pages[2], self.pages[2].title) self.npeaks = 3 elif npeaks == 4: self.addTab(self.pages[2], self.pages[2].title) self.addTab(self.pages[3], self.pages[3].title) self.npeaks = 4 elif self.npeaks == 3: if npeaks == 2: self.removeTab(2) self.npeaks = 2 elif npeaks == 4: self.addTab(self.pages[3], self.pages[3].title) self.npeaks = 4 elif self.npeaks == 4: if npeaks == 2: self.removeTab(3) self.removeTab(2) self.npeaks = 2 elif npeaks == 3: self.removeTab(3) self.npeaks = 3 class PeakPage(QWidget): '''A peak page widget''' def __init__(self, title, ID): '''Initialize''' super(PeakPage, self).__init__() self.title = title self.ID = ID self._createWidgets() self._initUI() self._makeConnections() def _createWidgets(self): '''Create the contained widgets''' self.inputpeak = QLineEdit(self) self.inputGL = QLineEdit(self) self.inputGG = QLineEdit(self) self.inputH = QLineEdit(self) self.newpeak = QLineEdit(self) self.newGL = QLineEdit(self) self.newGG = QLineEdit(self) self.newH = QLineEdit(self) self.inputpeak.setValidator(QDoubleValidator(300.0, 3000.0, 1, self.inputpeak)) self.inputGL.setValidator(QDoubleValidator(0.0, 100.0, 3, self.inputGL)) self.inputGG.setValidator(QDoubleValidator(0.0, 100.0, 3, self.inputGG)) self.inputH.setValidator(QDoubleValidator(0.0, 1.0, 3, self.inputH)) self.newpeak.setReadOnly(True) self.newGL.setReadOnly(True) self.newGG.setReadOnly(True) self.newH.setReadOnly(True) self.inputpeak.setToolTip(ttt('The vibrational frequency of this peak' ', in wavenumbers')) self.inputGL.setToolTip(ttt('The Lorentzian FWHM broadening of this ' 'peak, in wavenumbers')) self.inputGG.setToolTip(ttt('The Gaussian FWHM broadening of this peak' ', in wavenumbers')) self.inputH.setToolTip(ttt('The relative height of this peak')) self.newpeak.setToolTip(ttt('The vibrational frequency after ' 'exchange')) self.newGL.setToolTip(ttt('The Gaussian FWHM after exchange')) self.newGG.setToolTip(ttt('The Lorentzian FWHM after exchange')) self.newH.setToolTip(ttt('The relative height after exchange')) def _initUI(self): '''Layout the contained widgets''' lo = QGridLayout() lo.addWidget(QLabel("Input Value"), 1, 2) lo.addWidget(QLabel("New Value"), 1, 4) lo.addWidget(QLabel("Peak"), 2, 1) lo.addWidget(self.inputpeak, 2, 2) lo.addWidget(QLabel("-->"), 2, 3) lo.addWidget(self.newpeak, 2, 4) lo.addWidget(QLabel("Lorentz FWHM"), 3, 1) lo.addWidget(self.inputGL, 3, 2) lo.addWidget(QLabel("-->"), 3, 3) lo.addWidget(self.newGL, 3, 4) lo.addWidget(QLabel("Gauss FWHM"), 4, 1) lo.addWidget(self.inputGG, 4, 2) lo.addWidget(QLabel("-->"), 4, 3) lo.addWidget(self.newGG, 4, 4) lo.addWidget(QLabel("Height"), 5, 1) lo.addWidget(self.inputH, 5, 2) lo.addWidget(QLabel("-->"), 5, 3) lo.addWidget(self.newH, 5, 4) self.setLayout(lo) def _makeConnections(self): '''Connect the contained widgets together''' self.inputpeak.editingFinished.connect(self.inputParamsChanged) self.inputGL.editingFinished.connect(self.inputParamsChanged) self.inputGG.editingFinished.connect(self.inputParamsChanged) self.inputH.editingFinished.connect(self.inputParamsChanged) def viewNewParams(self, p, GL, GG, h): '''View the new parameters after exchange''' self.newpeak.setText('{0:.1f}'.format(p)) self.newGL.setText('{0:.3f}'.format(GL)) self.newGG.setText('{0:.3f}'.format(GG)) self.newH.setText('{0:.3f}'.format(h)) def setParams(self, p, GL, GG, h): '''Manually set the parameters on this page''' self.inputpeak.setText('{0:.1f}'.format(p)) self.inputGL.setText('{0:.3f}'.format(GL)) self.inputGG.setText('{0:.3f}'.format(GG)) self.inputH.setText('{0:.3f}'.format(h)) # Force an updata of the data self.inputpeak.editingFinished.emit() ####### # SLOTS ####### def inputParamsChanged(self): '''Collects the parameters from this page and broadcasts them''' try: vib = float(self.inputpeak.text()) except ValueError: return try: GL = float(self.inputGL.text()) except ValueError: return try: GG = float(self.inputGG.text()) except ValueError: return try: h = float(self.inputH.text()) except ValueError: return self.changeInputParams.emit(self.ID, vib, GL, GG, h) ######### # SIGNALS ######### # Signals for when a value is changed changeInputParams = Signal(int, float, float, float, float) class PeakModel(QObject): '''Class to hold all information about the peaks''' def __init__(self, parent = None): '''Initialize the function class''' super(PeakModel, self).__init__(parent) self.npeaks = 0 #self.peaks = [1960.0, 1980.0, 2000.0, 2020.0] self.peaks = [nan, nan, nan, nan] self.GL = [nan, nan, nan, nan] #self.GL = [5.0, 5.0, 5.0, 5.0] self.GG = [nan, nan, nan, nan] #self.GG = [5.0, 5.0, 5.0, 5.0] self.h = [nan, nan, nan, nan] #self.h = [1.0, 1.0, 1.0, 1.0] self.newpeaks = [0.0, 0.0, 0.0, 0.0] self.newGL = [0.0, 0.0, 0.0, 0.0] self.newGG = [0.0, 0.0, 0.0, 0.0] self.newh = [0.0, 0.0, 0.0, 0.0] def getParams(self): '''Return the input parameters for the given number of peaks''' return (asarray(self.peaks[:self.npeaks]), asarray(self.GL[:self.npeaks]), asarray(self.GG[:self.npeaks]), asarray(self.h[:self.npeaks])) ####### # SLOTS ####### def setNewParams(self, p, GL, GG, h): '''Set the parameters after exchange''' # First, replace the values in the lists for i, vals in enumerate(zip(p, GL, GG, h)): self.newpeaks[i] = vals[0] self.newGL[i] = vals[1] self.newGG[i] = vals[2] self.newh[i] = vals[3] # Now broadcast results self.newParams.emit(p, GL, GG, h) def changePeakNum(self, npeaks): '''Change the number of peaks''' self.npeaks = npeaks self.changeNumPeaks.emit(self.npeaks) def setInputParams(self, num, p, GL, GG, h): '''Set the input parameter for a given peak, then emit''' self.peaks[num] = p self.GL[num] = GL self.GG[num] = GG self.h[num] = h self.inputParamsChanged.emit() ######### # SIGNALS ######### # Release the parameters to calculate the spectrum inputParamsChanged = Signal() # View the new parameters after exchange newParams = Signal(list, list, list, list) # Change the number of peaks changeNumPeaks = Signal(int) ``` #### File: rapid/gui/plot.py ```python from __future__ import print_function, division, absolute_import # Non-std. lib imports from PySide.QtGui import QFrame, QPalette, QColor, QPen, QApplication from PySide.QtCore import Qt, Signal from numpy import array from random import random # Local imports from rapid.pyqtgraph import PlotWidget, ViewBox from rapid.common import normalize, clip from rapid.gui.guicommon import error class Plot(PlotWidget): '''A plot''' def __init__(self, parent, title = ''): '''Initialize the plot and it's parent class''' border = {'color': 0.5, 'width': 2} super(Plot, self).__init__(parent, viewBox=ViewBox(border=border)) self._setupPlot(title) def _setupPlot(self, title): '''Label plot axis and put a default function on the plot''' # Add a title if given if title: self.setTitle(title) self.rawData = None # Default to not-reversed self.reversed = False # Set the axes for the intial data self.getPlotItem().setLabel('bottom', "Frequency (Wavenumbers, cm<sup>-1</sup>)") self.getPlotItem().getAxis('bottom').setPen('k') self.getPlotItem().setLabel('left', "Intensity (Normalized)") self.getPlotItem().getAxis('left').setPen('k') self.getPlotItem().setYRange(0, 1.1, padding=0) # # Make the background white and the line thick enough to see self.setBackground('w') # White # Create the XY data points. Empty for now. self.data = self.getPlotItem().plot([], [], antialias=True, connect='all', pen={'color': 'b', 'width': 2}) # The raw (experimental) data, if any self.raw = self.getPlotItem().plot([], [], antialias=True, connect='all', pen={'color': 'g', 'width': 2}) # # Make sure the plot is wide enough self.setMinimumWidth(850) def makeConnections(self): '''Connect the plot together''' self.scene().sigMouseMoved.connect(self.catchSelection) def calculatedData(self): '''Return the calculated data''' x, y = self.data.getData() return array(x), array(y) def getRawData(self): '''Return the raw data in same format it was read in''' x, y = self.raw.getData() return array([x, y]).T def setRawData(self, raw): '''Stores the raw data internally''' self.rawData = raw def plotRawData(self): '''Plot the raw data''' if self.rawData is None: error.showMessage("Cannot plot raw data, none has been given") return xlim = self.data.dataBounds(0) if xlim[0] > xlim[1]: xlim[0], xlim[1] = xlim[1], xlim[0] # Clip the data to only the plotting window (to remove baseline) raw = clip(self.rawData.copy(), xlim) raw[:,1] = normalize(raw[:,1]) self.raw.setData(raw[:,0], raw[:,1]) if not self.raw.isVisible(): self.raw.show() self.replot() ####### # SLOTS ####### def plotCalculatedData(self, x, y): '''Plot the calculated data''' try: y = normalize(y) except ValueError: # Occurs on startup return self.data.setData(x, y) self.replot() def clearRawData(self): '''Clear the raw data''' self.rawData = None self.raw.clear() def changeScale(self, min, max, reversed): '''Change the axis scale''' self.reversed = reversed self.getPlotItem().invertX(reversed) self.getPlotItem().setXRange(min, max) x, y = self.calculatedData() self.plotCalculatedData(x, y) if self.rawData is not None: self.plotRawData() def catchSelection(self, point): '''Catch a point and re-emit''' p = self.getPlotItem().getViewBox().mapSceneToView(point) self.pointPicked.emit(p.x(), p.y()) ######### # SIGNALS ######### pointPicked = Signal(float, float) ```
{ "source": "jensengroup/RegioSQM20", "score": 2 }
#### File: RegioSQM20/regiosqm/molecule_formats.py ```python import subprocess from rdkit.Chem import rdmolfiles, AllChem # TorsionFingerprints from rdkit.ML.Cluster import Butina def shell(cmd, shell=False): if shell: p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: cmd = cmd.split() p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, err = p.communicate() return output def convert_xyz_to_sdf(xyzfile, sdffile): shell(f'obabel -ixyz {xyzfile} -osdf -xf > {sdffile}', shell=True) return def convert_sdf_to_xyz(sdffile, xyzfile): shell(f'obabel -isdf {sdffile} -oxyz -xf > {xyzfile}', shell=True) return def get_bonds(sdf_file): isav = 0 atoms = 0 bond_list = [] searchlines = open(sdf_file, 'r').readlines() for i, line in enumerate(searchlines): words = line.split() #split line into words if len(words) < 1: continue if i == 3: atoms = int(words[0]) bonds = int(words[1]) if i > atoms+3 and i <= atoms+bonds+3: atom_1 = int(words[0]) atom_2 = int(words[1]) if atom_2 > atom_1: bond_list.append(tuple((atom_1,atom_2))) else: bond_list.append(tuple((atom_2,atom_1))) bond_list.sort() return bond_list def compare_sdf_structure(start, end): """ Returns True if structures are the same Return False if there has been a proton transfer """ bond_start = get_bonds(start) bond_end = get_bonds(end) return bond_start == bond_end def find_unique_confs(best_conformers, mol_files, threshold=0.5): """ Clustering conformers with RDKit's Butina algorithm to find unique conformer from a list of .sdf files using either heavy-atom root mean square deviation (RMSD) or heavy-atom torsion fingerprint deviation (TFD) """ rdkit_mol = next(rdmolfiles.ForwardSDMolSupplier(mol_files[0], sanitize=False, removeHs=True)) for mol_file in mol_files[1:]: mol = next(rdmolfiles.ForwardSDMolSupplier(mol_file, sanitize=False, removeHs=True)) rdkit_mol.AddConformer(mol.GetConformer(),assignId=True) # calculate difference matrix diffmat = AllChem.GetConformerRMSMatrix(rdkit_mol, prealigned=False) #threshold=0.5, sanitize=False, load AllChem # diffmat = TorsionFingerprints.GetTFDMatrix(rdkit_mol) #threshold=0.01, sanitize=True, load TorsionFingerprints # Cluster conformers num_confs = rdkit_mol.GetNumConformers() clt = Butina.ClusterData(diffmat, num_confs, threshold, isDistData=True, reordering=True) # Get unique conformers centroid_idx = [c[0] for c in clt] # centroid indexes. unique_best_conformers = [best_conformers[i] for i in centroid_idx] return unique_best_conformers if __name__ == "__main__": import sys print(compare_sdf_structure(sys.argv[1], sys.argv[2])) ``` #### File: RegioSQM20/regiosqm/molecule_svg.py ```python from rdkit import Chem from rdkit.Chem.Draw import rdMolDraw2D # from rdkit.Chem import rdDepictor # rdDepictor.SetPreferCoordGen(True) from reorder_atoms import get_atoms_in_order from collections import defaultdict # Drawing Options color_predicted = (0.2, 1, 0.0) # Green color_loseicted = (1.0, 0.1, 0.3) # Red color_measured = (0.0, 0.0, 0.0) # Black arad = 0.4 #0.25 #molsPerRow = 4 #change this in generate_structure() subImgSize = (300,300) def draw2d(mol, name, subImgSize, highlight_predicted, highlight_loseicted, measure=None): global color_predicted global color_loseicted global color_measured global arad d2d = rdMolDraw2D.MolDraw2DSVG(subImgSize[0], subImgSize[1]) d2d.SetFontSize(1) #atom label font size dos = d2d.drawOptions() dos.legendFontSize=23 #legend font size dos.atomHighlightsAreCircles = False dos.fillHighlights = True atomHighlighs = defaultdict(list) highlightRads = {} for idx in highlight_predicted: atomHighlighs[idx].append(color_predicted) highlightRads[idx] = arad # did threshold find some predictions? # find ones not in predicted list highlight_loseicted = list(set(highlight_loseicted)-set(highlight_predicted)) if len(highlight_loseicted): for idx in highlight_loseicted: atomHighlighs[idx].append(color_loseicted) highlightRads[idx] = arad if measure: for idx in measure: atomHighlighs[idx].append(color_measured) highlightRads[idx] = arad d2d.DrawMoleculeWithHighlights(mol, name, dict(atomHighlighs), {}, highlightRads, {}) d2d.FinishDrawing() return d2d.GetDrawingText() def generate_structure(ref_smi, smiles, names, predicted, highlight_measure=None): global subImgSize molsPerRow = 4 highlight_predicted, highlight_loseicted = predicted if names == None: names = ['' for i in range(len(smiles))] nRows = len(smiles) // molsPerRow if len(smiles) % molsPerRow: nRows += 1 if nRows == 1: molsPerRow = len(smiles) fullSize = (molsPerRow * subImgSize[0], nRows * subImgSize[1]) header = """<svg version='1.1' baseProfile='full' xmlns='http://www.w3.org/2000/svg' xmlns:rdkit='http://www.rdkit.org/xml' xmlns:xlink='http://www.w3.org/1999/xlink' xml:space='preserve' width='{0}px' height='{1}px' viewBox='0 0 {0} {1}'> <!-- END OF HEADER -->""".format(fullSize[0],fullSize[1]) spacer = '<g transform="translate({0},{1})">\n{2}</g>' ### Make sure the atoms are in order ### mols = [Chem.MolFromSmiles(smi) for smi in smiles] mols = get_atoms_in_order(Chem.MolFromSmiles(ref_smi), mols) cwidth = 0 cheight = 0 drawed_mols = [] for i in range(len(smiles)): res = draw2d(mols[i], names[i], subImgSize, highlight_predicted[i], highlight_loseicted[i], highlight_measure) res = res.split("\n") end_of_header = res.index("<!-- END OF HEADER -->") + 1 res = "\n".join(res[end_of_header:-2]) res = "".join(spacer.format(int(cwidth*subImgSize[0]), int(cheight*subImgSize[1]), res)) drawed_mols.append(res) if int(i+1) % molsPerRow == 0 and i != 0: cheight += 1 cwidth = 0 elif molsPerRow == 1: cheight += 1 cwidth = 0 else: cwidth += 1 svg = header + "\n" + "\n".join(drawed_mols) + "\n</svg>" return svg if __name__ == "__main__": ref_smi = 'c1c(c2cc(sc2)C)n[nH]c1' smiles = ['c1c(-c2cc(C)sc2)[nH]nc1', 'c1c(-c2cc(C)sc2)n[nH]c1'] names = ['taut1', 'taut2'] highlight_predicted = [[7,0], [10]] highlight_loseicted = [[7], [10]] highlight_measure = [0] result_svg = generate_structure(ref_smi, smiles, names, [highlight_predicted, highlight_loseicted], highlight_measure=highlight_measure) fd = open('test.svg','w') fd.write(result_svg) fd.close() ```
{ "source": "jensengroup/String-GA", "score": 2 }
#### File: jensengroup/String-GA/string_scoring_functions.py ```python from rdkit import Chem import string_crossover as co import scoring_functions as sc def logP_max(string,dummy): mol = co.string2mol(string) score = sc.logP_score(mol) return max(0,score) def logP_target(string,args): mol = co.string2mol(string) score = sc.logP_target(mol,args) return score def rediscovery(string,args): mol = co.string2mol(string) score = sc.rediscovery(mol,args) return score def absorbance_target(string,args): mol = co.string2mol(string) score = sc.absorbance_target(mol,args) return score def calculate_scores(population,function,scoring_args): scores = [] for gene in population: score = function(gene,scoring_args) scores.append(score) return scores if __name__ == "__main__": co.average_size = 39.15 co.size_stdev = 3.50 Celecoxib = 'O=S(=O)(c3ccc(n1nc(cc1c2ccc(cc2)C)C(F)(F)F)cc3)N' target = Chem.MolFromSmiles(Celecoxib) co.string_type = 'smiles' string = 'CCCCCCCC' score = logP_max(string,[]) score = rediscovery(string,[target]) print(score) ```
{ "source": "jensengroup/substituent_insulater_screening", "score": 2 }
#### File: jensengroup/substituent_insulater_screening/gemma_conf_search.py ```python import sys import numpy as np import pandas as pd from multiprocessing import Pool from rdkit import Chem from rdkit.Chem import AllChem sys.path.append("/home/koerstz/projects/gemma_part2/QMC_6.2") from qmmol import QMMol from qmconf import QMConf from calculator.xtb import xTB from conformers.create_conformers import RotatableBonds def gs_conformer_search(name, rdkit_conf, chrg, mult, cpus): """ ground state conformer search """ charged = False # hard coded for mogens # create conformers qmmol = QMMol() qmmol.add_conformer(rdkit_conf, fmt='rdkit', label=name, charged_fragments=charged, set_initial=True) #print(qmmol.conformers) #print(qmmol.conformers[0].write_xyz()) #quit() num_confs = 5 qmmol.create_random_conformers(threads=cpus, num_confs=num_confs) print(len(qmmol.conformers)) for conf in qmmol.conformers: print(conf.label) conf.write_xyz() quit() xtb_params = {'method': 'gfn2', 'opt': 'opt', 'cpus': 1} qmmol.calc = xTB(parameters=xtb_params) qmmol.optimize(num_procs=cpus, keep_files=True) #for conf in qmmol.conformers: # print(conf.label, conf.results['energy']) # conf.write_xyz() # Get most stable conformer. If most stable conformer # not identical to initial conf try second lowest. initial_smi = Chem.MolToSmiles(Chem.RemoveHs(qmmol.initial_conformer.get_rdkit_mol())) low_energy_conf = qmmol.nlowest(1)[0] try: conf_smi = Chem.MolToSmiles(Chem.RemoveHs(low_energy_conf.get_rdkit_mol())) except: conf_smi = 'fail' i = 1 while initial_smi != conf_smi: low_energy_conf = qmmol.nlowest(i+1)[-1] try: conf_smi = Chem.MolToSmiles(Chem.RemoveHs(low_energy_conf.get_rdkit_mol())) except: conf_smi = 'fail' i += 1 if len(qmmol.conformers) < i: sys.exit('no conformers match the initial input') return low_energy_conf def gs_gemma(tup): #name, smi, chrg, mult, cps): """GS conformers search given a smiles string """ cps = 1 name, smi, chrg, mult = tup.comp_name, tup.smiles, tup.charge, tup.multiplicity mol = Chem.AddHs(Chem.MolFromSmiles(smi)) AllChem.EmbedMolecule(mol) mol = Chem.AddHs(mol) Chem.MolToMolFile(mol, name + '.sdf') rdkit_conf = mol.GetConformer() qmconf = gs_conformer_search(name, rdkit_conf, chrg, mult, cps) #print(qmconf.results, qmconf.label) return qmconf if __name__ == '__main__': cpus = 2 data = pd.read_csv(sys.argv[1]) # find storage energy compound_list = list() for compound in data.itertuples(): mol = gs_gemma(compound) compound_list.append({'comp_name': compound.comp_name, 'mol': mol}) data_out = pd.DataFrame(compound_list) data_out.to_pickle(sys.argv[1].split('.')[0] + '.pkl') ```
{ "source": "jensengroup/take_elementary_step", "score": 3 }
#### File: jensengroup/take_elementary_step/rank_steps.py ```python import subprocess import re import os import operator import xyz2mol import read_xtbout_file as xtb import read_mopac_file as mopac from rdkit import Chem def shell(cmd, shell=False): if shell: p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: cmd = cmd.split() p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, err = p.communicate() return output def rank_by_energy(e_cut,base_name,entropy_correction,method): # The code will find all compounds with an energy less than the reactant plus all compounds with # an a higher energy but within "e_cut" kcal/mol. The molecules will be ranked with the lowest # energy compound first energy_ranked_molecules = [] au_to_kcal = 627.51 # File extention for GFN-xTB or MOPAC output files. Change if using something else if method == "xtb": file_names = shell('ls *.xtbout', shell=True).split("\n")[:-1] if method == "mopac": file_names = shell('ls *.out', shell=True).split("\n")[:-1] fragment_energies = {} for file_name in file_names: # Assumes GFN-xTB or MOPAC output files. You need to write your own parser if you use another program if method == "xtb": energy = xtb.get_energy(file_name) if method == "mopac": energy = mopac.get_energy(file_name) energy += entropy_correction fragment = file_name.split("+")[0] if fragment not in fragment_energies: fragment_energies[fragment] = [energy,file_name] elif energy < fragment_energies[fragment][0]: fragment_energies[fragment] = [energy,file_name] energies = {} for fragment in fragment_energies: name = fragment[:-1] energy = fragment_energies[fragment][0] file_name = fragment_energies[fragment][1] if name not in energies: energies[name] = [energy,file_name] else: energies[name][0] += energy energies[name][1] += ","+ file_name # The compund(s) labelled xxx0A,B, etc is assumed to be the starting structure used to create # the elementary steps, i.e. the reactant. for name in energies: if name.split(base_name)[1][:1] == "0": reactant_energy = energies[name][0] # The code will find all compounds with an energy less than the reactant plus all compounds with # an a higher energy but within "e_cut" kcal/mol. The molecules will be ranked with the lowest # energy compound first sorted_energies = sorted(energies.items(), key=operator.itemgetter(1)) # min_energy = sorted_energies[0][1][0] for name, (energy,file_name) in sorted_energies: e_diff = (energy - reactant_energy)*au_to_kcal if e_diff < e_cut: # print name, e_diff, file_name energy_ranked_molecules.append((name,round(e_diff,1),file_name)) return energy_ranked_molecules def files2mol(energy_ranked_files,charged_fragments,method): # the code assumes GFN-xTB or mopac output files. If you want to use another program add another parser smiles_list = [] molecules = [] for name,energy, file_name in energy_ranked_files: fragment_smiles_list = [] for fragment_file in file_name.split(","): try: if method == "xtb": charge,atomicNumList,xyz_coordinates = xtb.read_xtbout_file(fragment_file) if method == "mopac": charge,atomicNumList,xyz_coordinates = mopac.read_mopac_file(fragment_file) except: print fragment_file fragment_mol = xyz2mol.xyz2mol(atomicNumList,charge,xyz_coordinates,charged_fragments) fragment_smiles = Chem.MolToSmiles(fragment_mol) fragment_smiles_list.append(fragment_smiles) smiles = ".".join(fragment_smiles_list) if smiles not in smiles_list: molecules.append((name, smiles, energy, file_name)) smiles_list.append(smiles) return molecules if __name__ == "__main__": # The compund(s) labelled xxx0A,B, etc is assumed to be the starting structure used to create # the elementary steps, i.e. the reactant. # A, B, etc refer to fragments and the energies are combined. Example: the reactant is # CH4 + H2O, so xxx0A and xxx0B refer to CH4 and H2O, respectively and the energy of xxx0 # is the sum of these two energies. # The code will find all compounds with an energy less than the reactant plus all compounds with # an a higher energy but within "e_cut" kcal/mol. The molecules will be ranked with the lowest # energy compound first e_cut = 20. #kcal/mol # Rough estimate of the translational entropy correction -TS at 298K which is added to the energy. # This is important when comparing the energy of one molecule to that of two, e.g. CH3OH vs CH3O + H # If you are using free energies to rank your compounds this should be set to 0. au_to_kcal = 627.51 entropy_correction = -10./au_to_kcal # Make charged fragments, e.g. CH3O- + H+ instead of CH3O + H charged_fragments = False # It is assumed that the directory name and file names are same, e.g. directory0A for the reactant directory = "diels_alder" #directory = "ROOR" os.chdir(directory) # pick the method you want to use. The choices are currently GFN-xTB and MOPAC #method = "xtb" method = "mopac" energy_ranked_files = rank_by_energy(e_cut,directory,entropy_correction,method) energy_ranked_molecules = files2mol(energy_ranked_files,charged_fragments,method) for name, smiles, energy, file_name in energy_ranked_molecules: print name, smiles, energy, file_name ``` #### File: jensengroup/take_elementary_step/read_mopac_file.py ```python import subprocess import re import sys import xyz2mol def shell(cmd, shell=False): if shell: p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: cmd = cmd.split() p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, err = p.communicate() return output def get_energy(mopac_out): au_to_kcal = 627.51 line = shell('grep --text "HEAT OF FORMATION" '+mopac_out+' | tail -1', shell=True) energy = re.findall("[-\d]+\.\d+", line) if len(energy) != 0: energy = energy[0] energy = float(energy) energy = energy/au_to_kcal else: line = shell('grep --text "HEAT" '+mopac_out+' | tail -1', shell=True) energy = re.findall("[-\d]+\.\d+", line) if len(energy) != 0: energy = energy[-1] energy = float(energy) energy = energy/au_to_kcal else: energy = 999999.0 return energy def read_mopac_file(mopac_out): xyz_coordinates = [] atomic_symbols = [] line = shell('grep "CHARGE ON SYSTEM" '+mopac_out, shell=True) charge = re.findall(r"[-+]?\d*\.\d+|\d+", line)[0] charge = int(charge) line = shell('grep "Empirical Formula:" '+mopac_out, shell=True) number_of_atoms = re.findall(r"[-+]?\d*\.\d+|\d+", line)[-1] number_of_atoms = int(number_of_atoms) special_case_test = shell('grep "CURRENT VALUE OF GRADIENT NORM" '+mopac_out, shell=True) special_case = len(special_case_test) > 0 if special_case: lines = shell('grep -A'+str(number_of_atoms+7)+' "CURRENT VALUE OF GEOMETRY" '+mopac_out+' | tail -'+str(number_of_atoms+7), shell=True) lines = lines.split("\n") for i in range(4,number_of_atoms+4): symbol = re.findall(r'\b\w+\b', lines[i])[0] atomic_symbols.append(symbol) xyz = re.findall(r"[-+]?\d*\.\d+|\d+", lines[i]) xyz.pop(1) xyz.pop(2) xyz.pop(3) xyz = [float(j) for j in xyz] #print xyz xyz_coordinates.append(xyz) else: lines = shell('grep -A'+str(number_of_atoms+1)+' "CARTESIAN COORDINATES" '+mopac_out+' | tail -'+str(number_of_atoms), shell=True) lines = lines.split("\n") for i in range(number_of_atoms): #print re.findall(r"[A-Za-z]", lines[i]) symbol = re.findall(r'\b\w+\b', lines[i])[1] atomic_symbols.append(symbol) xyz = re.findall(r"[-+]?\d*\.\d+|\d+", lines[i]) xyz.pop(0) xyz = [float(j) for j in xyz] #print xyz xyz_coordinates.append(xyz) atomicNumList = xyz2mol.get_atomicNumList(atomic_symbols) return charge,atomicNumList,xyz_coordinates if __name__ == "__main__": #name = "finalheat.out" #name = "currentbest.out" name = "currentvalue.out" energy = get_energy(name) #print energy charge,atomicNumList,xyz_coordinates = read_mopac_file(name) ``` #### File: jensengroup/take_elementary_step/write_input_files.py ```python from rdkit import Chem from rdkit.Chem import AllChem from rdkit.Chem import rdMolDescriptors def get_fragments(mol,name): fragment_names = [] fragments = Chem.GetMolFrags(mol,asMols=True) labels = ["A","B","C"] for label,fragment in zip(labels,fragments): fragment_names.append(name+label) return fragments, fragment_names def generate_conformations(fragments, max_confs=20): for fragment in fragments: rot_bond = rdMolDescriptors.CalcNumRotatableBonds(fragment) confs = min(3 + 3*rot_bond,max_confs) AllChem.EmbedMultipleConfs(fragment,numConfs=confs) return fragments def write_mopac_input_file(fragment, fragment_name, keywords): number_of_atoms = fragment.GetNumAtoms() charge = Chem.GetFormalCharge(fragment) symbols = [a.GetSymbol() for a in fragment.GetAtoms()] for i,conf in enumerate(fragment.GetConformers()): file_name = fragment_name+"+"+str(i)+".mop" with open(file_name, "w") as file: file.write(keywords+str(charge)+"\n") file.write(" \n") file.write(" \n") for atom,symbol in enumerate(symbols): p = conf.GetAtomPosition(atom) line = " ".join((symbol,str(p.x),"1",str(p.y),"1",str(p.z),"1","\n")) file.write(line) def write_xtb_input_file(fragment, fragment_name): number_of_atoms = fragment.GetNumAtoms() charge = Chem.GetFormalCharge(fragment) symbols = [a.GetSymbol() for a in fragment.GetAtoms()] for i,conf in enumerate(fragment.GetConformers()): file_name = fragment_name+"+"+str(i)+".xyz" with open(file_name, "w") as file: file.write(str(number_of_atoms)+"\n") file.write("title\n") for atom,symbol in enumerate(symbols): p = conf.GetAtomPosition(atom) line = " ".join((symbol,str(p.x),str(p.y),str(p.z),"\n")) file.write(line) if charge !=0: file.write("$set\n") file.write("chrg "+str(charge)+"\n") file.write("$end") # GFN-xTB and MOPAC automatically switches to UHF if the number of electrons is odd, so there is no need # to specify the multiplicity. # If you need to do that for another program you can compute the number of electrons by # atomic_numbers = [a.GetAtomicNum() for a in fragment.GetAtoms()] # number_of_electrons = sum(atomic_numbers) - charge def write_input_files(mol,name,method,keywords): # This version writes input files for GFN-xTB and MOPAC. If you want to use another program then replace # add additional functions fragments, fragment_names = get_fragments(mol,name) fragments = generate_conformations(fragments) for fragment, fragment_name in zip(fragments, fragment_names): if method == "xtb": write_xtb_input_file(fragment, fragment_name) if method == "mopac": write_mopac_input_file(fragment, fragment_name,keywords) if __name__ == "__main__": smiles = "CCC.CC" name = "test" #method = "xtb" #keywords = "" method = "mopac" keywords = "pm3 cycles=200 charge=" mol = Chem.MolFromSmiles(smiles) mol = Chem.AddHs(mol) write_input_files(mol,name,method,keywords) ```
{ "source": "jensengroup/tstools", "score": 2 }
#### File: tstools/tstools/embed.py ```python import logging import copy from itertools import product import numpy as np from rdkit import Chem from rdkit.Chem import ( AllChem, rdDistGeom, rdMolTransforms, rdmolops, ) from rdkit.Geometry import Point3D from tstools.utils import get_fragments _logger = logging.getLogger("embed") def reassign_atom_idx(mol): """Reassigns the RDKit mol object atomid to atom mapped id""" renumber = [(atom.GetIdx(), atom.GetAtomMapNum()) for atom in mol.GetAtoms()] new_idx = [idx[0] for idx in sorted(renumber, key=lambda x: x[1])] mol = Chem.RenumberAtoms(mol, new_idx) rdmolops.AssignStereochemistry(mol, force=True) return mol class EmbedError(Exception): pass def embed_fragment(molobj: Chem.Mol, seed: int = 31) -> Chem.Mol: """Embed fragment/simple molecule""" rdmol = copy.deepcopy(molobj) try: rdDistGeom.EmbedMolecule( rdmol, useRandomCoords=False, randomSeed=seed, maxAttempts=10_000, ETversion=1, ) except: raise EmbedError("RDKit Failed to embed Molecule.") Chem.SanitizeMol(rdmol) AllChem.UFFOptimizeMolecule(rdmol) return rdmol def embed_fragments(molobjs: list[Chem.Mol], seed: int = 31) -> list[Chem.Mol]: """Embed list of mol objs""" embedded_molobjs = [] for molobj in molobjs: embedded_molobjs.append(embed_fragment(molobj, seed=seed)) return embedded_molobjs def _sort_fragments_size(fragments): """ """ frag_size = [frag.GetNumAtoms() for frag in fragments] sorted_frags = sorted( list(zip(fragments, frag_size)), key=lambda x: x[1], reverse=True ) return [x[0] for x in sorted_frags] def center_fragments(molobjs: list[Chem.Mol]) -> None: """ """ for molobj in molobjs: confobj = molobj.GetConformer() centroid_point = rdMolTransforms.ComputeCentroid(confobj) for i in range(molobj.GetNumAtoms()): orginal_point = confobj.GetAtomPosition(i) confobj.SetAtomPosition(i, orginal_point - centroid_point) def _translate_fragment(molobj: Chem.Mol, direction: list[float]): """ """ confobj = molobj.GetConformer() for i in range(confobj.GetNumAtoms()): atom_pos = confobj.GetAtomPosition(i) atom_pos += direction confobj.SetAtomPosition(i, atom_pos) return molobj def _check_embedded_fragments(new_molobj, old_molobj, cutoff=1.5): """ Check if atoms of two molecules are too close. """ new_fragment = new_molobj.GetConformer().GetPositions() old_fragment = old_molobj.GetConformer().GetPositions() for i, j in product(range(old_fragment.shape[0]), range(new_fragment.shape[0])): if i >= j: if np.linalg.norm(new_fragment[j] - old_fragment[i]) < cutoff: return False return True def simple_embed_and_translate( molobj: Chem.Mol, seed: int = 31, direction: list[float] = [0.4, 0.0, 0.0] ): """ Embed fragments, and move fragment in `direction`. If more than 2 fragments it just embed on a line. """ molobjs = get_fragments(molobj) frag_molobjs = embed_fragments(molobjs, seed=seed) center_fragments(frag_molobjs) max_fragment_size = frag_molobjs[0].GetNumAtoms() direction = Point3D(*direction) for fragid, fragment_molobj in enumerate(frag_molobjs): if fragid == 0: merged_molobj = copy.deepcopy(fragment_molobj) continue fragment_molobj = _translate_fragment( fragment_molobj, direction * max_fragment_size ) merged_molobj = Chem.CombineMols(merged_molobj, fragment_molobj) max_fragment_size += fragment_molobj.GetNumAtoms() return reassign_atom_idx(merged_molobj) def random_embedding( molobj: Chem.Mol, seed: int = 42, translation_distance: float = 4.0, distance_cutoff=2.0, max_attempts=10, ): """Embed similarly to ChemDyME: <NAME>, <NAME>, <NAME>, <NAME>, arXiv [physics.chem-ph] 2021. http://arxiv.org/abs/2104.02389 """ np.random.seed(seed=seed) molobjs = get_fragments(molobj) frag_molobjs = embed_fragments(molobjs, seed=seed) center_fragments(frag_molobjs) for fragid, fragment_molobj in enumerate(frag_molobjs): if fragid == 0: merged_molobj = copy.deepcopy(fragment_molobj) continue # Keep making a random embedding until fragments are # far enough way from eavh other. count = 0 while ( _check_embedded_fragments( fragment_molobj, merged_molobj, cutoff=distance_cutoff ) is False ): random_vector = np.random.rand(3) unit_vector = random_vector / np.linalg.norm(random_vector) fragment_molobj = _translate_fragment( fragment_molobj, Point3D(*(translation_distance * unit_vector)) ) count += 1 if count == max_attempts: _logger.critical("Can't embbed molecule") return None merged_molobj = Chem.CombineMols(merged_molobj, fragment_molobj) return reassign_atom_idx(merged_molobj) ``` #### File: tstools/tstools/refine_tasks.py ```python import logging import numpy as np from rdkit import Chem from rdkit.Chem import rdForceFieldHelpers, rdMolAlign from rdkit.ForceField import rdForceField _logger = logging.getLogger() def interpolate_structures( path_points: list[np.ndarray], atom_symbols, n_points = 20, calculator = None ): """ """ if len(path_points) != 2: raise RuntimeError('Can only interpolate between two structures') path_m1, path_p1 = path_points n_atoms = len(atom_symbols) difference_mat = path_m1 - path_p1 interpolated_coords = np.zeros((n_points, n_atoms, 3)) interpolated_energies = np.zeros(n_atoms) for i in range(n_points + 1): interpolated_coords[i - 1] = path_p1 + i / n_points * difference_mat if calculator is None: return interpolated_coords, interpolated_energies for i, coords in enumerate(interpolated_coords): energies = calculator(atom_symbols, coords, namespace="interpolate")['energy'] interpolated_energies[i] = energies['elec_energy'] return interpolated_coords, interpolated_energies def refine_uff_dist_constrained( molobj, bond_idx, min_distance=4.0, max_distance=6.0, use_vdW=False, # TODO: Set vdw force_constant=1.0, opt_max_iters=1_000, opt_force_tol=1e-4, opt_energy_tol=1e-6, ): """ Refines embedded molobj by performing a constrained UFF optimization. The distance constraints between `bond_idx` makes sure that the fragments are but in somewhat the correct position. """ if not rdForceFieldHelpers.UFFHasAllMoleculeParams(molobj): raise NotImplementedError("UFF doesn't have parameters!") Chem.SanitizeMol(molobj) ff = rdForceFieldHelpers.UFFGetMoleculeForceField( molobj, ignoreInterfragInteractions=False ) for atom_i, atom_j in bond_idx: ff.UFFAddDistanceConstraint( int(atom_i), int(atom_j), False, float(min_distance), float(max_distance), float(force_constant), ) ff.Initialize() exit_msg = ff.Minimize( maxIts=opt_max_iters, forceTol=opt_force_tol, energyTol=opt_energy_tol ) if exit_msg != 0: _logger.warning("Constrained FF optimization didn't converge.") return molobj def refine_uff_constrained_embed(reactant, product, force_constant=100.0): """ Refines the reactant or product similarly to RDKit's contained embed. Tries to fit `mol` to the core through a heavily constrained FF optimization. The core is the molecule (reactant or product) with the most bonds. """ if not rdForceFieldHelpers.UFFHasAllMoleculeParams(reactant): raise NotImplementedError("UFF doesn't have reactant parameters!") if not rdForceFieldHelpers.UFFHasAllMoleculeParams(product): raise NotImplementedError("UFF doesn't have product parameters!") n_bonds_reactant = reactant.GetNumBonds() n_bonds_product = product.GetNumBonds() if n_bonds_reactant >= n_bonds_product: reac_is_core = True mol, core = product, reactant else: reac_is_core = False mol, core = reactant, product coreConf = core.GetConformer() ff = rdForceFieldHelpers.UFFGetMoleculeForceField( mol, ignoreInterfragInteractions=False ) for i in range(core.GetNumAtoms()): for j in range(i + 1, core.GetNumAtoms()): corePtI = coreConf.GetAtomPosition(i) corePtJ = coreConf.GetAtomPosition(j) d = corePtI.Distance(corePtJ) ff.AddDistanceConstraint(i, j, d, d, force_constant) ff.Initialize() n = 4 more = ff.Minimize() while more and n: ff.Minimize() n -= 1 if reac_is_core: return core, mol else: return mol, core ```
{ "source": "jensens/libvcs", "score": 2 }
#### File: libvcs/libvcs/exc.py ```python class LibVCSException(Exception): """Standard exception raised by libvcs.""" pass class CommandError(LibVCSException): """This exception is raised on non-zero return codes.""" def __init__(self, output, returncode=None, cmd=None): self.returncode = returncode self.output = output if cmd: if isinstance(cmd, list): cmd = ' '.join(cmd) self.cmd = cmd def __str__(self): message = self.message.format(returncode=self.returncode, cmd=self.cmd) if len(self.output.strip()): message += '\n%s' % self.output return message message = 'Command failed with code {returncode}: {cmd}' class CommandTimeoutError(CommandError): """CommandError which gets raised when a subprocess exceeds its timeout.""" pass class InvalidPipURL(LibVCSException): """Invalid pip-style URL.""" def __init__(self, url): self.url = url super(InvalidPipURL, self).__init__() def __str__(self): return self.message message = ( 'Repo URL %s requires a vcs scheme. Prepend the vcs (hg+, git+, svn+)' 'to the repo URL. e.g: git+https://github.com/freebsd/freebsd.git' ) class InvalidVCS(LibVCSException): """Invalid VCS.""" pass ``` #### File: libvcs/tests/test_hg.py ```python import os import pytest from libvcs.shortcuts import create_repo_from_pip_url from libvcs.util import run, which if not which('hg'): pytestmark = pytest.mark.skip(reason="hg is not available") @pytest.fixture def hg_remote(parentdir, scope='session'): """Create a git repo with 1 commit, used as a remote.""" name = 'dummyrepo' repo_path = str(parentdir.join(name)) run(['hg', 'init', name], cwd=str(parentdir)) testfile_filename = 'testfile.test' run(['touch', testfile_filename], cwd=repo_path) run(['hg', 'add', testfile_filename], cwd=repo_path) run(['hg', 'commit', '-m', 'test file for %s' % name], cwd=repo_path) return repo_path def test_repo_mercurial(tmpdir, parentdir, hg_remote): repo_name = 'my_mercurial_project' mercurial_repo = create_repo_from_pip_url( **{ 'pip_url': 'hg+file://' + hg_remote, 'repo_dir': str(parentdir.join(repo_name)), } ) run(['hg', 'init', mercurial_repo.repo_name], cwd=str(tmpdir)) mercurial_repo.update_repo() test_repo_revision = run( ['hg', 'parents', '--template={rev}'], cwd=str(parentdir.join(repo_name)) ) assert mercurial_repo.get_revision() == test_repo_revision assert os.path.exists(str(tmpdir.join(repo_name))) ```
{ "source": "jensens/melange", "score": 3 }
#### File: src/melange/fragment.py ```python from melange.exceptions import FragmentError import json import lxml.html import UserDict class MelangeFragment(object): """Single Melange Fragment """ def __init__(self, raw): self._manifest = None self.raw = raw self.html = lxml.html.fromstring(raw) self.manifest # initialize and minor validation @property def manifest(self): """reads manifest from fragment once and caches """ if self._manifest is None: for element in self.html.xpath('//*[@data-melange-manifest]'): # take first raw = element.attrib['data-melange-manifest'] self._manifest = json.loads(raw) break if self._manifest is None: raise FragmentError('No manifest found.') if 'name' not in self._manifest: raise FragmentError('Manifest must contain a "name".\n' + raw) return self._manifest @property def name(self): return self.manifest['name'] def __str__(self): return lxml.html.tostring(self.html) class FragmentRegistry(UserDict.IterableUserDict): """registry for MelangeFragments """ def register(self, raw): fragment = MelangeFragment(raw) self.data[fragment.name] = fragment def __add__(self, raw): self.register(raw) return self fragment_registry = FragmentRegistry() ```
{ "source": "JensGck/platform", "score": 2 }
#### File: DevOps/Locust/locustfile.py ```python import os import sys import time from locust import FastHttpUser, task, between, constant,tag from bs4 import BeautifulSoup sys.path.append(os.path.dirname(__file__) + '/..') from common.storefront import Storefront from common.context import Context from common.api import Api context = Context() class Erp(FastHttpUser): fixed_count = 1 def on_start(self): self.api = Api(self.client, context) @task def call_api(self): if (context.erp == False): return self.api.update_prices() self.api.update_stock() class Visitor(FastHttpUser): wait_time = between(2, 5) weight = 10 @task(3) def listing(self): page = Storefront(self.client, context) page.go_to_listing() page.view_products(2) page.go_to_listing() if (context.allow_filter == True): page.add_manufacturer_filter() page.select_sorting() page.go_to_next_page() page.view_products(3) page.go_to_listing() if (context.allow_filter == True): page.add_property_filter() page.go_to_next_page() page.view_products(2) @task(1) def search(self): page = Storefront(self.client, context) page.do_search() page.view_products(2) page.go_to_next_page() page.view_products(2) page.go_to_next_page() if (context.allow_filter == True): page.add_manufacturer_filter() page.select_sorting() page.view_products(3) class Surfer(FastHttpUser): wait_time = between(2, 5) weight = 6 @task def surf(self): page = Storefront(self.client, context) # search products over listings page.go_to_listing() # take a look to the first two products page.view_products(2) page.go_to_next_page() # open two different product pages page.view_products(2) # sort listing and use properties to filter page.select_sorting() if (context.allow_filter == True): page.add_property_filter() page.view_products(1) page.go_to_next_page() # switch to search to find products page.do_search() page.view_products(2) # use property filter to find products if (context.allow_filter == True): page.add_property_filter() # take a look to the top three hits page.view_products(3) page.go_to_next_page() class SurfWithOrder(FastHttpUser): wait_time = between(2, 5) weight = 6 @task def surf(self): page = Storefront(self.client, context) page.register() #instead of login, we register page.browse_account() # search products over listings page.go_to_listing() # take a look to the first two products page.view_products(2) page.add_product_to_cart() page.go_to_next_page() # open two different product pages page.view_products(2) page.add_product_to_cart() # sort listing and use properties to filter page.select_sorting() if (context.allow_filter == True): page.add_property_filter() page.view_products(1) page.go_to_next_page() page.add_product_to_cart() page.instant_order() # switch to search to find products page.do_search() page.view_products(2) if (context.allow_filter == True): # use property filter to find products page.add_property_filter() # take a look to the top three hits page.view_products(3) page.add_product_to_cart() page.add_product_to_cart() page.go_to_next_page() page.view_products(2) page.add_product_to_cart() page.add_product_to_cart() page.add_product_to_cart() page.instant_order() page.logout() class FastOrder(FastHttpUser): weight = 4 def on_start(self): self.page = Storefront(self.client, context) self.page.register() self.page.logout() @task def order(self): self.page.login() self.page.add_products_to_cart(3) self.page.instant_order() self.page.logout() class Nvidia(FastHttpUser): weight = 2 @task def follow_advertisement(self): page = Storefront(self.client, context) page.register() page.add_advertisement() page.instant_order() page.logout() ```
{ "source": "jensgk/optbinning", "score": 2 }
#### File: optbinning/binning/binning_information.py ```python from ..information import print_header from ..information import print_optional_parameters from ..information import print_solver_statistics from ..options import continuous_optimal_binning_default_options from ..options import multiclass_optimal_binning_default_options from ..options import optimal_binning_default_options from ..options import sboptimal_binning_default_options from ..options import optimal_binning_2d_default_options def print_prebinning_statistics(n_prebins, n_refinement): prebinning_stats = ( " Pre-binning statistics\n" " Number of pre-bins {:>10}\n" " Number of refinements {:>10}\n" ).format(n_prebins, n_refinement) print(prebinning_stats) def print_timing(solver_type, solver, time_total, time_preprocessing, time_prebinning, time_solver, time_optimizer, time_postprocessing): p_preprocessing = time_preprocessing / time_total p_prebinning = time_prebinning / time_total p_solver = time_solver / time_total p_postprocessing = time_postprocessing / time_total if solver_type == "cp" and solver is not None: time_model_generation = time_solver - time_optimizer p_model_generation = time_model_generation / time_solver p_optimizer = time_optimizer / time_solver time_stats = ( " Timing\n" " Total time {:>18.2f} sec\n" " Pre-processing {:>18.2f} sec ({:>7.2%})\n" " Pre-binning {:>18.2f} sec ({:>7.2%})\n" " Solver {:>18.2f} sec ({:>7.2%})\n" " model generation {:>18.2f} sec ({:>7.2%})\n" " optimizer {:>18.2f} sec ({:>7.2%})\n" " Post-processing {:>18.2f} sec ({:>7.2%})\n" ).format(time_total, time_preprocessing, p_preprocessing, time_prebinning, p_prebinning, time_solver, p_solver, time_model_generation, p_model_generation, time_optimizer, p_optimizer, time_postprocessing, p_postprocessing) else: time_stats = ( " Timing\n" " Total time {:>18.2f} sec\n" " Pre-processing {:>18.2f} sec ({:>7.2%})\n" " Pre-binning {:>18.2f} sec ({:>7.2%})\n" " Solver {:>18.2f} sec ({:>7.2%})\n" " Post-processing {:>18.2f} sec ({:>7.2%})\n" ).format(time_total, time_preprocessing, p_preprocessing, time_prebinning, p_prebinning, time_solver, p_solver, time_postprocessing, p_postprocessing) print(time_stats) def print_name_status(name, status): if not name: name = "UNKNOWN" print(" Name : {:<32}\n" " Status : {:<32}\n".format(name, status)) def print_main_info(name, status, time_total): print_name_status(name, status) print(" Time : {:<7.4f} sec\n".format(time_total)) def print_binning_information(binning_type, print_level, name, status, solver_type, solver, time_total, time_preprocessing, time_prebinning, time_solver, time_optimizer, time_postprocessing, n_prebins, n_refinements, dict_user_options): print_header() if print_level == 2: if binning_type == "optimalbinning": dict_default_options = optimal_binning_default_options elif binning_type == "multiclassoptimalbinning": dict_default_options = multiclass_optimal_binning_default_options elif binning_type == "continuousoptimalbinning": dict_default_options = continuous_optimal_binning_default_options elif binning_type == "sboptimalbinning": dict_default_options = sboptimal_binning_default_options elif binning_type == "optimalbinning2d": dict_default_options = optimal_binning_2d_default_options print_optional_parameters(dict_default_options, dict_user_options) if print_level == 0: print_main_info(name, status, time_total) elif print_level >= 1: print_name_status(name, status) print_prebinning_statistics(n_prebins, n_refinements) if status in ("OPTIMAL", "FEASIBLE"): if solver is not None: print_solver_statistics(solver_type, solver) print_timing(solver_type, solver, time_total, time_preprocessing, time_prebinning, time_solver, time_optimizer, time_postprocessing) ``` #### File: optbinning/binning/binning_process.py ```python import numbers import time from warnings import warn import dill import numpy as np import pandas as pd from joblib import Parallel, delayed, effective_n_jobs from sklearn.base import BaseEstimator from sklearn.exceptions import NotFittedError from sklearn.utils import check_array from sklearn.utils import check_consistent_length from sklearn.utils.multiclass import type_of_target from ..logging import Logger from .base import Base from .binning import OptimalBinning from .binning_process_information import print_binning_process_information from .continuous_binning import ContinuousOptimalBinning from .multiclass_binning import MulticlassOptimalBinning from .piecewise.binning import OptimalPWBinning from .piecewise.continuous_binning import ContinuousOptimalPWBinning logger = Logger(__name__).logger _METRICS = { "binary": { "metrics": ["iv", "js", "gini", "quality_score"], "iv": {"min": 0, "max": np.inf}, "gini": {"min": 0, "max": 1}, "js": {"min": 0, "max": np.inf}, "quality_score": {"min": 0, "max": 1} }, "multiclass": { "metrics": ["js", "quality_score"], "js": {"min": 0, "max": np.inf}, "quality_score": {"min": 0, "max": 1} }, "continuous": { "metrics": [] } } _OPTB_TYPES = (OptimalBinning, ContinuousOptimalBinning, MulticlassOptimalBinning) _OPTBPW_TYPES = (OptimalPWBinning, ContinuousOptimalPWBinning) def _read_column(input_path, extension, column, **kwargs): if extension == "csv": x = pd.read_csv(input_path, engine='c', usecols=[column], low_memory=False, memory_map=True, **kwargs) elif extension == "parquet": x = pd.read_parquet(input_path, columns=[column], **kwargs) return x.iloc[:, 0].values def _fit_variable(x, y, name, target_dtype, categorical_variables, binning_fit_params, max_n_prebins, min_prebin_size, min_n_bins, max_n_bins, min_bin_size, max_pvalue, max_pvalue_policy, special_codes, split_digits, sample_weight=None): params = {} dtype = _check_variable_dtype(x) if categorical_variables is not None: if name in categorical_variables: dtype = "categorical" if binning_fit_params is not None: params = binning_fit_params.get(name, {}) if target_dtype == "binary": optb = OptimalBinning( name=name, dtype=dtype, max_n_prebins=max_n_prebins, min_prebin_size=min_prebin_size, min_n_bins=min_n_bins, max_n_bins=max_n_bins, min_bin_size=min_bin_size, max_pvalue=max_pvalue, max_pvalue_policy=max_pvalue_policy, special_codes=special_codes, split_digits=split_digits) elif target_dtype == "continuous": optb = ContinuousOptimalBinning( name=name, dtype=dtype, max_n_prebins=max_n_prebins, min_prebin_size=min_prebin_size, min_n_bins=min_n_bins, max_n_bins=max_n_bins, min_bin_size=min_bin_size, max_pvalue=max_pvalue, max_pvalue_policy=max_pvalue_policy, special_codes=special_codes, split_digits=split_digits) else: if dtype == "categorical": raise ValueError("MulticlassOptimalBinning does not support " "categorical variables.") optb = MulticlassOptimalBinning( name=name, max_n_prebins=max_n_prebins, min_prebin_size=min_prebin_size, min_n_bins=min_n_bins, max_n_bins=max_n_bins, min_bin_size=min_bin_size, max_pvalue=max_pvalue, max_pvalue_policy=max_pvalue_policy, special_codes=special_codes, split_digits=split_digits) optb.set_params(**params) if target_dtype == "binary": optb.fit(x, y, sample_weight) else: optb.fit(x, y) return dtype, optb def _fit_block(X, y, names, target_dtype, categorical_variables, binning_fit_params, max_n_prebins, min_prebin_size, min_n_bins, max_n_bins, min_bin_size, max_pvalue, max_pvalue_policy, special_codes, split_digits, sample_weight=None): variable_dtypes = {} binned_variables = {} for i, name in enumerate(names): if isinstance(X, np.ndarray): dtype, optb = _fit_variable( X[:, i], y, name, target_dtype, categorical_variables, binning_fit_params, max_n_prebins, min_prebin_size, min_n_bins, max_n_bins, min_bin_size, max_pvalue, max_pvalue_policy, special_codes, split_digits, sample_weight) else: dtype, optb = _fit_variable( X[name], y, name, target_dtype, categorical_variables, binning_fit_params, max_n_prebins, min_prebin_size, min_n_bins, max_n_bins, min_bin_size, max_pvalue, max_pvalue_policy, special_codes, split_digits, sample_weight) variable_dtypes[name] = dtype binned_variables[name] = optb return variable_dtypes, binned_variables def _check_selection_criteria(selection_criteria, target_dtype): default_metrics_info = _METRICS[target_dtype] default_metrics = default_metrics_info["metrics"] if not all(m in default_metrics for m in selection_criteria.keys()): raise ValueError("metric for {} target must be in {}." .format(target_dtype, default_metrics)) for metric, info in selection_criteria.items(): if not isinstance(info, dict): raise TypeError("metric {} info is not a dict.".format(metric)) for key, value in info.items(): if key == "min": min_ref = default_metrics_info[metric][key] if value < min_ref: raise ValueError("metric {} min value {} < {}." .format(metric, value, min_ref)) elif key == "max": max_ref = default_metrics_info[metric][key] if value > max_ref: raise ValueError("metric {} max value {} > {}." .format(metric, value, max_ref)) elif key == "strategy": if value not in ("highest", "lowest"): raise ValueError('strategy value for metric {} must be ' '"highest" or "lowest"; got {}.' .format(value, metric)) elif key == "top": if isinstance(value, numbers.Integral): if value < 1: raise ValueError("top value must be at least 1 or " "in (0, 1); got {}.".format(value)) else: if not 0. < value < 1.: raise ValueError("top value must be at least 1 or " "in (0, 1); got {}.".format(value)) else: raise KeyError(key) def _check_parameters(variable_names, max_n_prebins, min_prebin_size, min_n_bins, max_n_bins, min_bin_size, max_bin_size, max_pvalue, max_pvalue_policy, selection_criteria, fixed_variables, categorical_variables, special_codes, split_digits, binning_fit_params, binning_transform_params, n_jobs, verbose): if not isinstance(variable_names, (np.ndarray, list)): raise TypeError("variable_names must be a list or numpy.ndarray.") if not isinstance(max_n_prebins, numbers.Integral) or max_n_prebins <= 1: raise ValueError("max_prebins must be an integer greater than 1; " "got {}.".format(max_n_prebins)) if not 0. < min_prebin_size <= 0.5: raise ValueError("min_prebin_size must be in (0, 0.5]; got {}." .format(min_prebin_size)) if min_n_bins is not None: if not isinstance(min_n_bins, numbers.Integral) or min_n_bins <= 0: raise ValueError("min_n_bins must be a positive integer; got {}." .format(min_n_bins)) if max_n_bins is not None: if not isinstance(max_n_bins, numbers.Integral) or max_n_bins <= 0: raise ValueError("max_n_bins must be a positive integer; got {}." .format(max_n_bins)) if min_n_bins is not None and max_n_bins is not None: if min_n_bins > max_n_bins: raise ValueError("min_n_bins must be <= max_n_bins; got {} <= {}." .format(min_n_bins, max_n_bins)) if min_bin_size is not None: if (not isinstance(min_bin_size, numbers.Number) or not 0. < min_bin_size <= 0.5): raise ValueError("min_bin_size must be in (0, 0.5]; got {}." .format(min_bin_size)) if max_bin_size is not None: if (not isinstance(max_bin_size, numbers.Number) or not 0. < max_bin_size <= 1.0): raise ValueError("max_bin_size must be in (0, 1.0]; got {}." .format(max_bin_size)) if min_bin_size is not None and max_bin_size is not None: if min_bin_size > max_bin_size: raise ValueError("min_bin_size must be <= max_bin_size; " "got {} <= {}.".format(min_bin_size, max_bin_size)) if max_pvalue is not None: if (not isinstance(max_pvalue, numbers.Number) or not 0. < max_pvalue <= 1.0): raise ValueError("max_pvalue must be in (0, 1.0]; got {}." .format(max_pvalue)) if max_pvalue_policy not in ("all", "consecutive"): raise ValueError('Invalid value for max_pvalue_policy. Allowed string ' 'values are "all" and "consecutive".') if selection_criteria is not None: if not isinstance(selection_criteria, dict): raise TypeError("selection_criteria must be a dict.") if fixed_variables is not None: if not isinstance(fixed_variables, (np.ndarray, list)): raise TypeError("fixed_variables must be a list or numpy.ndarray.") if categorical_variables is not None: if not isinstance(categorical_variables, (np.ndarray, list)): raise TypeError("categorical_variables must be a list or " "numpy.ndarray.") if not all(isinstance(c, str) for c in categorical_variables): raise TypeError("variables in categorical_variables must be " "strings.") if special_codes is not None: if not isinstance(special_codes, (np.ndarray, list, dict)): raise TypeError("special_codes must be a dit, list or " "numpy.ndarray.") if isinstance(special_codes, dict) and not len(special_codes): raise ValueError("special_codes empty. special_codes dict must " "contain at least one special.") if split_digits is not None: if (not isinstance(split_digits, numbers.Integral) or not 0 <= split_digits <= 8): raise ValueError("split_digits must be an integer in [0, 8]; " "got {}.".format(split_digits)) if binning_fit_params is not None: if not isinstance(binning_fit_params, dict): raise TypeError("binning_fit_params must be a dict.") if binning_transform_params is not None: if not isinstance(binning_transform_params, dict): raise TypeError("binning_transform_params must be a dict.") if n_jobs is not None: if not isinstance(n_jobs, numbers.Integral): raise ValueError("n_jobs must be an integer or None; got {}." .format(n_jobs)) if not isinstance(verbose, bool): raise TypeError("verbose must be a boolean; got {}.".format(verbose)) def _check_variable_dtype(x): return "categorical" if x.dtype == object else "numerical" class BaseBinningProcess: @classmethod def load(cls, path): """Load binning process from pickle file. Parameters ---------- path : str Pickle file path. Example ------- >>> from optbinning import BinningProcess >>> binning_process = BinningProcess.load("my_binning_process.pkl") """ if not isinstance(path, str): raise TypeError("path must be a string.") with open(path, "rb") as f: return dill.load(f) def save(self, path): """Save binning process to pickle file. Parameters ---------- path : str Pickle file path. """ if not isinstance(path, str): raise TypeError("path must be a string.") with open(path, "wb") as f: dill.dump(self, f) def _support_selection_criteria(self): self._support = np.full(self._n_variables, True, dtype=bool) if self.selection_criteria is None: return default_metrics_info = _METRICS[self._target_dtype] criteria_metrics = self.selection_criteria.keys() binning_metrics = pd.DataFrame.from_dict(self._variable_stats).T for metric in default_metrics_info["metrics"]: if metric in criteria_metrics: metric_info = self.selection_criteria[metric] metric_values = binning_metrics[metric].values if "min" in metric_info: self._support &= metric_values >= metric_info["min"] if "max" in metric_info: self._support &= metric_values <= metric_info["max"] if all(m in metric_info for m in ("strategy", "top")): indices_valid = np.where(self._support)[0] metric_values = metric_values[indices_valid] n_valid = len(metric_values) # Auxiliary support support = np.full(self._n_variables, False, dtype=bool) top = metric_info["top"] if not isinstance(top, numbers.Integral): top = int(np.ceil(n_valid * top)) n_selected = min(n_valid, top) if metric_info["strategy"] == "highest": mask = np.argsort(-metric_values)[:n_selected] elif metric_info["strategy"] == "lowest": mask = np.argsort(metric_values)[:n_selected] support[indices_valid[mask]] = True self._support &= support # Fixed variables if self.fixed_variables is not None: for fv in self.fixed_variables: idfv = list(self.variable_names).index(fv) self._support[idfv] = True def _binning_selection_criteria(self): for i, name in enumerate(self.variable_names): optb = self._binned_variables[name] optb.binning_table.build() n_bins = len(optb.splits) if optb.dtype == "numerical": n_bins += 1 info = {"dtype": optb.dtype, "status": optb.status, "n_bins": n_bins} if self._target_dtype in ("binary", "multiclass"): optb.binning_table.analysis(print_output=False) if self._target_dtype == "binary": metrics = { "iv": optb.binning_table.iv, "gini": optb.binning_table.gini, "js": optb.binning_table.js, "quality_score": optb.binning_table.quality_score} else: metrics = { "js": optb.binning_table.js, "quality_score": optb.binning_table.quality_score} elif self._target_dtype == "continuous": metrics = {} info = {**info, **metrics} self._variable_stats[name] = info self._support_selection_criteria() class BinningProcess(Base, BaseEstimator, BaseBinningProcess): """Binning process to compute optimal binning of variables in a dataset, given a binary, continuous or multiclass target dtype. Parameters ---------- variable_names : array-like List of variable names. max_n_prebins : int (default=20) The maximum number of bins after pre-binning (prebins). min_prebin_size : float (default=0.05) The fraction of mininum number of records for each prebin. min_n_bins : int or None, optional (default=None) The minimum number of bins. If None, then ``min_n_bins`` is a value in ``[0, max_n_prebins]``. max_n_bins : int or None, optional (default=None) The maximum number of bins. If None, then ``max_n_bins`` is a value in ``[0, max_n_prebins]``. min_bin_size : float or None, optional (default=None) The fraction of minimum number of records for each bin. If None, ``min_bin_size = min_prebin_size``. max_bin_size : float or None, optional (default=None) The fraction of maximum number of records for each bin. If None, ``max_bin_size = 1.0``. max_pvalue : float or None, optional (default=0.05) The maximum p-value among bins. max_pvalue_policy : str, optional (default="consecutive") The method to determine bins not satisfying the p-value constraint. Supported methods are "consecutive" to compare consecutive bins and "all" to compare all bins. selection_criteria : dict or None (default=None) Variable selection criteria. See notes. .. versionadded:: 0.6.0 fixed_variables : array-like or None List of variables to be fixed. The binning process will retain these variables if the selection criteria is not satisfied. .. versionadded:: 0.12.1 special_codes : array-like or None, optional (default=None) List of special codes. Use special codes to specify the data values that must be treated separately. split_digits : int or None, optional (default=None) The significant digits of the split points. If ``split_digits`` is set to 0, the split points are integers. If None, then all significant digits in the split points are considered. categorical_variables : array-like or None, optional (default=None) List of variables numerical variables to be considered categorical. These are nominal variables. Not applicable when target type is multiclass. binning_fit_params : dict or None, optional (default=None) Dictionary with optimal binning fitting options for specific variables. Example: ``{"variable_1": {"max_n_bins": 4}}``. binning_transform_params : dict or None, optional (default=None) Dictionary with optimal binning transform options for specific variables. Example ``{"variable_1": {"metric": "event_rate"}}``. n_jobs : int or None, optional (default=None) Number of cores to run in parallel while binning variables. ``None`` means 1 core. ``-1`` means using all processors. .. versionadded:: 0.7.1 verbose : bool (default=False) Enable verbose output. Notes ----- Parameter ``selection_criteria`` allows to specify criteria for variable selection. The input is a dictionary as follows .. code:: selection_criteria = { "metric_1": { "min": 0, "max": 1, "strategy": "highest", "top": 0.25 }, "metric_2": { "min": 0.02 } } where several metrics can be combined. For example, above dictionary indicates that top 25% variables with "metric_1" in [0, 1] and "metric_2" greater or equal than 0.02 are selected. Supported key values are: * keys ``min`` and ``max`` support numerical values. * key ``strategy`` supports options "highest" and "lowest". * key ``top`` supports an integer or decimal (percentage). .. warning:: If the binning process instance is going to be saved, do not pass the option ``"solver": "mip"`` via the ``binning_fit_params`` parameter. """ def __init__(self, variable_names, max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, max_pvalue=None, max_pvalue_policy="consecutive", selection_criteria=None, fixed_variables=None, categorical_variables=None, special_codes=None, split_digits=None, binning_fit_params=None, binning_transform_params=None, n_jobs=None, verbose=False): self.variable_names = variable_names self.max_n_prebins = max_n_prebins self.min_prebin_size = min_prebin_size self.min_n_bins = min_n_bins self.max_n_bins = max_n_bins self.min_bin_size = min_bin_size self.max_bin_size = max_bin_size self.max_pvalue = max_pvalue self.max_pvalue_policy = max_pvalue_policy self.selection_criteria = selection_criteria self.fixed_variables = fixed_variables self.binning_fit_params = binning_fit_params self.binning_transform_params = binning_transform_params self.special_codes = special_codes self.split_digits = split_digits self.categorical_variables = categorical_variables self.n_jobs = n_jobs self.verbose = verbose # auxiliary self._n_samples = None self._n_variables = None self._target_dtype = None self._n_numerical = None self._n_categorical = None self._n_selected = None self._binned_variables = {} self._variable_dtypes = {} self._variable_stats = {} self._support = None # timing self._time_total = None self._is_updated = False self._is_fitted = False def fit(self, X, y, sample_weight=None, check_input=False): """Fit the binning process. Fit the optimal binning to all variables according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where n_samples is the number of samples. .. versionchanged:: 0.4.0 X supports ``numpy.ndarray`` and ``pandas.DataFrame``. y : array-like of shape (n_samples,) Target vector relative to x. sample_weight : array-like of shape (n_samples,) (default=None) Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Only applied if ``prebinning_method="cart"``. This option is only available for a binary target. check_input : bool (default=False) Whether to check input arrays. Returns ------- self : BinningProcess Fitted binning process. """ return self._fit(X, y, sample_weight, check_input) def fit_disk(self, input_path, target, **kwargs): """Fit the binning process according to the given training data on disk. Parameters ---------- input_path : str Any valid string path to a file with extension .csv or .parquet. target : str Target column. **kwargs : keyword arguments Keyword arguments for ``pandas.read_csv`` or ``pandas.read_parquet``. Returns ------- self : BinningProcess Fitted binning process. """ return self._fit_disk(input_path, target, **kwargs) def fit_from_dict(self, dict_optb): """Fit the binning process from a dict of OptimalBinning objects already fitted. Parameters ---------- dict_optb : dict Dictionary with OptimalBinning objects for binary, continuous or multiclass target. All objects must share the same class. Returns ------- self : BinningProcess Fitted binning process. """ return self._fit_from_dict(dict_optb) def fit_transform(self, X, y, sample_weight=None, metric=None, metric_special=0, metric_missing=0, show_digits=2, check_input=False): """Fit the binning process according to the given training data, then transform it. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where n_samples is the number of samples. y : array-like of shape (n_samples,) Target vector relative to x. sample_weight : array-like of shape (n_samples,) (default=None) Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Only applied if ``prebinning_method="cart"``. This option is only available for a binary target. metric : str or None, (default=None) The metric used to transform the input vector. If None, the default transformation metric for each target type is applied. For binary target options are: "woe" (default), "event_rate", "indices" and "bins". For continuous target options are: "mean" (default), "indices" and "bins". For multiclass target options are: "mean_woe" (default), "weighted_mean_woe", "indices" and "bins". metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate for a binary target, and any numerical value for other targets. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate for a binary target, and any numerical value for other targets. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. check_input : bool (default=False) Whether to check input arrays. Returns ------- X_new : numpy array, shape = (n_samples, n_features_new) Transformed array. """ return self.fit(X, y, sample_weight, check_input).transform( X, metric, metric_special, metric_missing, show_digits, check_input) def fit_transform_disk(self, input_path, output_path, target, chunksize, metric=None, metric_special=0, metric_missing=0, show_digits=2, **kwargs): """Fit the binning process according to the given training data on disk, then transform it and save to comma-separated values (csv) file. Parameters ---------- input_path : str Any valid string path to a file with extension .csv. output_path : str Any valid string path to a file with extension .csv. target : str Target column. chunksize : Rows to read, transform and write at a time. metric : str or None, (default=None) The metric used to transform the input vector. If None, the default transformation metric for each target type is applied. For binary target options are: "woe" (default), "event_rate", "indices" and "bins". For continuous target options are: "mean" (default), "indices" and "bins". For multiclass target options are: "mean_woe" (default), "weighted_mean_woe", "indices" and "bins". metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate for a binary target, and any numerical value for other targets. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate for a binary target, and any numerical value for other targets. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. **kwargs : keyword arguments Keyword arguments for ``pandas.read_csv``. Returns ------- self : BinningProcess Fitted binning process. """ return self.fit_disk(input_path, target, **kwargs).transform_disk( input_path, output_path, chunksize, metric, metric_special, metric_missing, show_digits, **kwargs) def transform(self, X, metric=None, metric_special=0, metric_missing=0, show_digits=2, check_input=False): """Transform given data to metric using bins from each fitted optimal binning. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where n_samples is the number of samples. metric : str or None, (default=None) The metric used to transform the input vector. If None, the default transformation metric for each target type is applied. For binary target options are: "woe" (default), "event_rate", "indices" and "bins". For continuous target options are: "mean" (default), "indices" and "bins". For multiclass target options are: "mean_woe" (default), "weighted_mean_woe", "indices" and "bins". metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate for a binary target, and any numerical value for other targets. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate for a binary target, and any numerical value for other targets. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. check_input : bool (default=False) Whether to check input arrays. Returns ------- X_new : numpy array or pandas.DataFrame, shape = (n_samples, n_features_new) Transformed array. """ self._check_is_fitted() return self._transform(X, metric, metric_special, metric_missing, show_digits, check_input) def transform_disk(self, input_path, output_path, chunksize, metric=None, metric_special=0, metric_missing=0, show_digits=2, **kwargs): """Transform given data on disk to metric using bins from each fitted optimal binning. Save to comma-separated values (csv) file. Parameters ---------- input_path : str Any valid string path to a file with extension .csv. output_path : str Any valid string path to a file with extension .csv. chunksize : Rows to read, transform and write at a time. metric : str or None, (default=None) The metric used to transform the input vector. If None, the default transformation metric for each target type is applied. For binary target options are: "woe" (default), "event_rate", "indices" and "bins". For continuous target options are: "mean" (default), "indices" and "bins". For multiclass target options are: "mean_woe" (default), "weighted_mean_woe", "indices" and "bins". metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate for a binary target, and any numerical value for other targets. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate for a binary target, and any numerical value for other targets. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. **kwargs : keyword arguments Keyword arguments for ``pandas.read_csv``. Returns ------- self : BinningProcess Fitted binning process. """ self._check_is_fitted() return self._transform_disk(input_path, output_path, chunksize, metric, metric_special, metric_missing, show_digits, **kwargs) def information(self, print_level=1): """Print overview information about the options settings and statistics. Parameters ---------- print_level : int (default=1) Level of details. """ self._check_is_fitted() if not isinstance(print_level, numbers.Integral) or print_level < 0: raise ValueError("print_level must be an integer >= 0; got {}." .format(print_level)) n_numerical = list(self._variable_dtypes.values()).count("numerical") n_categorical = self._n_variables - n_numerical self._n_selected = np.count_nonzero(self._support) dict_user_options = self.get_params() print_binning_process_information( print_level, self._n_samples, self._n_variables, self._target_dtype, n_numerical, n_categorical, self._n_selected, self._time_total, dict_user_options) def summary(self): """Binning process summary with main statistics for all binned variables. Parameters ---------- df_summary : pandas.DataFrame Binning process summary. """ self._check_is_fitted() if self._is_updated: self._binning_selection_criteria() self._is_updated = False df_summary = pd.DataFrame.from_dict(self._variable_stats).T df_summary.reset_index(inplace=True) df_summary.rename(columns={"index": "name"}, inplace=True) df_summary["selected"] = self._support columns = ["name", "dtype", "status", "selected", "n_bins"] columns += _METRICS[self._target_dtype]["metrics"] return df_summary[columns] def get_binned_variable(self, name): """Return optimal binning object for a given variable name. Parameters ---------- name : string The variable name. """ self._check_is_fitted() if not isinstance(name, str): raise TypeError("name must be a string.") if name in self.variable_names: return self._binned_variables[name] else: raise ValueError("name {} does not match a binned variable." .format(name)) def update_binned_variable(self, name, optb): """Update optimal binning object for a given variable. Parameters ---------- name : string The variable name. optb : object The optimal binning object already fitted. """ self._check_is_fitted() if not isinstance(name, str): raise TypeError("name must be a string.") if name not in self.variable_names: raise ValueError("name {} does not match a binned variable." .format(name)) optb_types = _OPTB_TYPES + _OPTBPW_TYPES if not isinstance(optb, optb_types): raise TypeError("Object {} must be of type ({}); got {}" .format(name, optb_types, type(optb))) # Check current class if self._target_dtype == "binary": optb_binary = (OptimalBinning, OptimalPWBinning) if not isinstance(optb, optb_binary): raise TypeError("target is binary and Object {} must be of " "type {}.".format(optb, optb_binary)) elif self._target_dtype == "continuous": optb_continuous = (ContinuousOptimalBinning, ContinuousOptimalPWBinning) if not isinstance(optb, optb_continuous): raise TypeError("target is continuous and Object {} must be " "of type {}.".format(optb, optb_continuous)) elif self._target_dtype == "multiclass": if not isinstance(optb, MulticlassOptimalBinning): raise TypeError("target is multiclass and Object {} must be " "of type {}.".format( optb, MulticlassOptimalBinning)) optb_old = self._binned_variables[name] if optb_old.name and optb_old.name != optb.name: raise ValueError("Update object name must match old object name; " "{} != {}.".format(optb_old.name, optb.name)) if optb.name and name != optb.name: raise ValueError("name and object name must coincide.") self._binned_variables[name] = optb self._is_updated = True def get_support(self, indices=False, names=False): """Get a mask, or integer index, or names of the variables selected. Parameters ---------- indices : boolean (default=False) If True, the return value will be an array of integers, rather than a boolean mask. names : boolean (default=False) If True, the return value will be an array of strings, rather than a boolean mask. Returns ------- support : array An index that selects the retained features from a feature vector. If `indices` is False, this is a boolean array of shape [# input features], in which an element is True iff its corresponding feature is selected for retention. If `indices` is True, this is an integer array of shape [# output features] whose values are indices into the input feature vector. If `names` is True, this is an string array of sahpe [# output features], whose values are names of the selected features. """ self._check_is_fitted() if indices and names: raise ValueError("Only indices or names can be True.") mask = self._support if indices: return np.where(mask)[0] elif names: return np.asarray(self.variable_names)[mask] else: return mask def _fit(self, X, y, sample_weight, check_input): time_init = time.perf_counter() if self.verbose: logger.info("Binning process started.") logger.info("Options: check parameters.") _check_parameters(**self.get_params()) # check X dtype if not isinstance(X, (pd.DataFrame, np.ndarray)): raise TypeError("X must be a pandas.DataFrame or numpy.ndarray.") # check target dtype self._target_dtype = type_of_target(y) if self._target_dtype not in ("binary", "continuous", "multiclass"): raise ValueError("Target type {} is not supported." .format(self._target_dtype)) # check sample weight if sample_weight is not None and self._target_dtype != "binary": raise ValueError("Target type {} does not support sample weight." .format(self._target_dtype)) if self.selection_criteria is not None: _check_selection_criteria(self.selection_criteria, self._target_dtype) # check X and y data if check_input: X = check_array(X, ensure_2d=False, dtype=None, force_all_finite='allow-nan') y = check_array(y, ensure_2d=False, dtype=None, force_all_finite=True) check_consistent_length(X, y) self._n_samples, self._n_variables = X.shape if self._n_variables != len(self.variable_names): raise ValueError("The number of columns must be equal to the" "length of variable_names.") if self.verbose: logger.info("Dataset: number of samples: {}." .format(self._n_samples)) logger.info("Dataset: number of variables: {}." .format(self._n_variables)) # Number of jobs n_jobs = effective_n_jobs(self.n_jobs) if self.verbose: logger.info("Options: number of jobs (cores): {}." .format(n_jobs)) if n_jobs == 1: for i, name in enumerate(self.variable_names): if self.verbose: logger.info("Binning variable ({} / {}): {}." .format(i, self._n_variables, name)) if isinstance(X, np.ndarray): dtype, optb = _fit_variable( X[:, i], y, name, self._target_dtype, self.categorical_variables, self.binning_fit_params, self.max_n_prebins, self.min_prebin_size, self.min_n_bins, self.max_n_bins, self.min_bin_size, self.max_pvalue, self.max_pvalue_policy, self.special_codes, self.split_digits, sample_weight) else: dtype, optb = _fit_variable( X[name], y, name, self._target_dtype, self.categorical_variables, self.binning_fit_params, self.max_n_prebins, self.min_prebin_size, self.min_n_bins, self.max_n_bins, self.min_bin_size, self.max_pvalue, self.max_pvalue_policy, self.special_codes, self.split_digits, sample_weight) self._variable_dtypes[name] = dtype self._binned_variables[name] = optb else: ids = np.arange(len(self.variable_names)) id_blocks = np.array_split(ids, n_jobs) names = np.asarray(self.variable_names) if isinstance(X, np.ndarray): blocks = Parallel(n_jobs=n_jobs, prefer="threads")( delayed(_fit_block)( X[:, id_block], y, names[id_block], self._target_dtype, self.categorical_variables, self.binning_fit_params, self.max_n_prebins, self.min_prebin_size, self.min_n_bins, self.max_n_bins, self.min_bin_size, self.max_pvalue, self.max_pvalue_policy, self.special_codes, self.split_digits) for id_block in id_blocks) else: blocks = Parallel(n_jobs=n_jobs, prefer="threads")( delayed(_fit_block)( X[names[id_block]], y, names[id_block], self._target_dtype, self.categorical_variables, self.binning_fit_params, self.max_n_prebins, self.min_prebin_size, self.min_n_bins, self.max_n_bins, self.min_bin_size, self.max_pvalue, self.max_pvalue_policy, self.special_codes, self.split_digits) for id_block in id_blocks) for b in blocks: vt, bv = b self._variable_dtypes.update(vt) self._binned_variables.update(bv) if self.verbose: logger.info("Binning process variable selection...") # Compute binning statistics and decide whether a variable is selected self._binning_selection_criteria() self._time_total = time.perf_counter() - time_init if self.verbose: logger.info("Binning process terminated. Time: {:.4f}s" .format(self._time_total)) # Completed successfully self._is_fitted = True return self def _fit_disk(self, input_path, target, **kwargs): time_init = time.perf_counter() if self.verbose: logger.info("Binning process started.") logger.info("Options: check parameters.") _check_parameters(**self.get_params()) # Input file extension extension = input_path.split(".")[1] # Check extension if extension not in ("csv", "parquet"): raise ValueError("input_path extension must be csv or parquet; " "got {}.".format(extension)) # Check target if not isinstance(target, str): raise TypeError("target must be a string.") # Retrieve target and check dtype y = _read_column(input_path, extension, target, **kwargs) self._target_dtype = type_of_target(y) if self._target_dtype not in ("binary", "continuous", "multiclass"): raise ValueError("Target type {} is not supported." .format(self._target_dtype)) if self.selection_criteria is not None: _check_selection_criteria(self.selection_criteria, self._target_dtype) if self.fixed_variables is not None: for fv in self.fixed_variables: if fv not in self.variable_names: raise ValueError("Variable {} to be fixed is not a valid " "variable name.".format(fv)) self._n_samples = len(y) self._n_variables = len(self.variable_names) if self.verbose: logger.info("Dataset: number of samples: {}." .format(self._n_samples)) logger.info("Dataset: number of variables: {}." .format(self._n_variables)) for name in self.variable_names: x = _read_column(input_path, extension, name, **kwargs) dtype, optb = _fit_variable( x, y, name, self._target_dtype, self.categorical_variables, self.binning_fit_params, self.max_n_prebins, self.min_prebin_size, self.min_n_bins, self.max_n_bins, self.min_bin_size, self.max_pvalue, self.max_pvalue_policy, self.special_codes, self.split_digits) self._variable_dtypes[name] = dtype self._binned_variables[name] = optb if self.verbose: logger.info("Binning process variable selection...") # Compute binning statistics and decide whether a variable is selected self._binning_selection_criteria() self._time_total = time.perf_counter() - time_init if self.verbose: logger.info("Binning process terminated. Time: {:.4f}s" .format(self._time_total)) # Completed successfully self._is_fitted = True return self def _fit_from_dict(self, dict_optb): time_init = time.perf_counter() if self.verbose: logger.info("Binning process started.") logger.info("Options: check parameters.") _check_parameters(**self.get_params()) if not isinstance(dict_optb, dict): raise TypeError("dict_optb must be a dict.") # Check variable names if set(dict_optb.keys()) != set(self.variable_names): raise ValueError("dict_optb keys and variable names must " "coincide.") # Check objects class optb_types = _OPTB_TYPES types = set() for name, optb in dict_optb.items(): if not isinstance(name, str): raise TypeError("Object key must be a string.") if not isinstance(optb, optb_types): raise TypeError("Object {} must be of type ({}); got {}" .format(name, optb_types, type(optb))) types.add(type(optb).__name__) if len(types) > 1: raise TypeError("All binning objects must be of the same " "class.") # Check if fitted if not optb._is_fitted: raise NotFittedError("Object with key={} is not fitted yet. " "Call 'fit' for this object before " "passing to a binning process." .format(name)) # Check if name was provided and matches dict_optb key. if optb.name and optb.name != name: raise ValueError("Object with key={} has attribute name={}. " "If object has a name those must coincide." .format(name, optb.name)) obj_class = types.pop() if obj_class == "OptimalBinning": self._target_dtype = "binary" elif obj_class == "ContinuousOptimalBinning": self._target_dtype = "continuous" elif obj_class == "MulticlassOptimalBinning": self._target_dtype = "multiclass" if self.selection_criteria is not None: _check_selection_criteria(self.selection_criteria, self._target_dtype) self._n_samples = 0 self._n_variables = len(self.variable_names) for name, optb in dict_optb.items(): self._variable_dtypes[name] = optb.dtype self._binned_variables[name] = optb # Compute binning statistics and decide whether a variable is selected self._binning_selection_criteria() self._time_total = time.perf_counter() - time_init if self.verbose: logger.info("Binning process terminated. Time: {:.4f}s" .format(self._time_total)) # Completed successfully self._is_fitted = True return self def _transform(self, X, metric, metric_special, metric_missing, show_digits, check_input): # Check X dtype if not isinstance(X, (pd.DataFrame, np.ndarray)): raise TypeError("X must be a pandas.DataFrame or numpy.ndarray.") n_samples, n_variables = X.shape mask = self.get_support() if not mask.any(): warn("No variables were selected: either the data is" " too noisy or the selection_criteria too strict.", UserWarning) return np.empty(0).reshape((n_samples, 0)) if isinstance(X, np.ndarray) and len(mask) != n_variables: raise ValueError("X has a different shape that during fitting.") if isinstance(X, pd.DataFrame): selected_variables = self.get_support(names=True) for name in selected_variables: if name not in X.columns: raise ValueError("Selected variable {} must be a column " "in the input dataframe.".format(name)) # Check metric if metric in ("indices", "bins"): if any(isinstance(optb, _OPTBPW_TYPES) for optb in self._binned_variables.values()): raise TypeError("metric {} not supported for piecewise " "optimal binning objects.".format(metric)) indices_selected_variables = self.get_support(indices=True) n_selected_variables = len(indices_selected_variables) if metric == "indices": X_transform = np.full( (n_samples, n_selected_variables), -1, dtype=int) elif metric == "bins": X_transform = np.full( (n_samples, n_selected_variables), "", dtype=object) else: X_transform = np.zeros((n_samples, n_selected_variables)) for i, idx in enumerate(indices_selected_variables): name = self.variable_names[idx] optb = self._binned_variables[name] if isinstance(X, np.ndarray): x = X[:, idx] else: x = X[name] params = {} if self.binning_transform_params is not None: params = self.binning_transform_params.get(name, {}) metric = params.get("metric", metric) metric_missing = params.get("metric_missing", metric_missing) metric_special = params.get("metric_special", metric_special) tparams = { "x": x, "metric": metric, "metric_special": metric_special, "metric_missing": metric_missing, "check_input": check_input, "show_digits": show_digits } if isinstance(optb, _OPTBPW_TYPES): tparams.pop("show_digits") if metric is None: tparams.pop("metric") X_transform[:, i] = optb.transform(**tparams) if isinstance(X, pd.DataFrame): return pd.DataFrame(X_transform, columns=selected_variables) return X_transform def _transform_disk(self, input_path, output_path, chunksize, metric, metric_special, metric_missing, show_digits, **kwargs): # check input_path and output_path extensions input_extension = input_path.split(".")[1] output_extension = output_path.split(".")[1] if input_extension != "csv" or output_extension != "csv": raise ValueError("input_path and output_path must be csv files.") # check chunksize if not isinstance(chunksize, numbers.Integral) or chunksize <= 0: raise ValueError("chunksize must be a positive integer; got {}." .format(chunksize)) # Check metric if metric in ("indices", "bins"): if any(isinstance(optb, _OPTBPW_TYPES) for optb in self._binned_variables.values()): raise TypeError("metric {} not supported for piecewise " "optimal binning objects.".format(metric)) selected_variables = self.get_support(names=True) n_selected_variables = len(selected_variables) chunks = pd.read_csv(input_path, engine='c', chunksize=chunksize, usecols=selected_variables, **kwargs) for k, chunk in enumerate(chunks): n_samples, n_variables = chunk.shape if metric == "indices": X_transform = np.full( (n_samples, n_selected_variables), -1, dtype=int) elif metric == "bins": X_transform = np.full( (n_samples, n_selected_variables), "", dtype=object) else: X_transform = np.zeros((n_samples, n_selected_variables)) for i, name in enumerate(selected_variables): optb = self._binned_variables[name] params = {} if self.binning_transform_params is not None: params = self.binning_transform_params.get(name, {}) metric = params.get("metric", metric) metric_missing = params.get("metric_missing", metric_missing) metric_special = params.get("metric_special", metric_special) tparams = { "x": chunk[name], "metric": metric, "metric_special": metric_special, "metric_missing": metric_missing, "show_digits": show_digits } if isinstance(optb, _OPTBPW_TYPES): tparams.pop("show_digits") if metric is None: tparams.pop("metric") X_transform[:, i] = optb.transform(**tparams) df = pd.DataFrame(X_transform, columns=selected_variables) df.to_csv(output_path, mode='a', index=False, header=(k == 0)) return self ``` #### File: optbinning/binning/continuous_binning.py ```python import numbers import time from sklearn.utils import check_array import numpy as np from ..information import solver_statistics from ..logging import Logger from .auto_monotonic import auto_monotonic_continuous from .auto_monotonic import peak_valley_trend_change_heuristic from .binning import OptimalBinning from .binning_statistics import continuous_bin_info from .binning_statistics import ContinuousBinningTable from .binning_statistics import target_info_special_continuous from .continuous_cp import ContinuousBinningCP from .preprocessing import preprocessing_user_splits_categorical from .preprocessing import split_data from .transformations import transform_continuous_target logger = Logger(__name__).logger def _check_parameters(name, dtype, prebinning_method, max_n_prebins, min_prebin_size, min_n_bins, max_n_bins, min_bin_size, max_bin_size, monotonic_trend, min_mean_diff, max_pvalue, max_pvalue_policy, outlier_detector, outlier_params, cat_cutoff, user_splits, user_splits_fixed, special_codes, split_digits, time_limit, verbose): if not isinstance(name, str): raise TypeError("name must be a string.") if dtype not in ("categorical", "numerical"): raise ValueError('Invalid value for dtype. Allowed string ' 'values are "categorical" and "numerical".') if prebinning_method not in ("cart", "quantile", "uniform"): raise ValueError('Invalid value for prebinning_method. Allowed string ' 'values are "cart", "quantile" and "uniform".') if not isinstance(max_n_prebins, numbers.Integral) or max_n_prebins <= 1: raise ValueError("max_prebins must be an integer greater than 1; " "got {}.".format(max_n_prebins)) if not 0. < min_prebin_size <= 0.5: raise ValueError("min_prebin_size must be in (0, 0.5]; got {}." .format(min_prebin_size)) if min_n_bins is not None: if not isinstance(min_n_bins, numbers.Integral) or min_n_bins <= 0: raise ValueError("min_n_bins must be a positive integer; got {}." .format(min_n_bins)) if max_n_bins is not None: if not isinstance(max_n_bins, numbers.Integral) or max_n_bins <= 0: raise ValueError("max_n_bins must be a positive integer; got {}." .format(max_n_bins)) if min_n_bins is not None and max_n_bins is not None: if min_n_bins > max_n_bins: raise ValueError("min_n_bins must be <= max_n_bins; got {} <= {}." .format(min_n_bins, max_n_bins)) if min_bin_size is not None: if (not isinstance(min_bin_size, numbers.Number) or not 0. < min_bin_size <= 0.5): raise ValueError("min_bin_size must be in (0, 0.5]; got {}." .format(min_bin_size)) if max_bin_size is not None: if (not isinstance(max_bin_size, numbers.Number) or not 0. < max_bin_size <= 1.0): raise ValueError("max_bin_size must be in (0, 1.0]; got {}." .format(max_bin_size)) if min_bin_size is not None and max_bin_size is not None: if min_bin_size > max_bin_size: raise ValueError("min_bin_size must be <= max_bin_size; " "got {} <= {}.".format(min_bin_size, max_bin_size)) if monotonic_trend is not None: if monotonic_trend not in ("auto", "auto_heuristic", "auto_asc_desc", "ascending", "descending", "convex", "concave", "peak", "valley", "peak_heuristic", "valley_heuristic"): raise ValueError('Invalid value for monotonic trend. Allowed ' 'string values are "auto", "auto_heuristic", ' '"auto_asc_desc", "ascending", "descending", ' '"concave", "convex", "peak", "valley", ' '"peak_heuristic" and "valley_heuristic".') if (not isinstance(min_mean_diff, numbers.Number) or min_mean_diff < 0): raise ValueError("min_mean_diff must be >= 0; got {}." .format(min_mean_diff)) if max_pvalue is not None: if (not isinstance(max_pvalue, numbers.Number) or not 0. < max_pvalue <= 1.0): raise ValueError("max_pvalue must be in (0, 1.0]; got {}." .format(max_pvalue)) if max_pvalue_policy not in ("all", "consecutive"): raise ValueError('Invalid value for max_pvalue_policy. Allowed string ' 'values are "all" and "consecutive".') if outlier_detector is not None: if outlier_detector not in ("range", "zscore"): raise ValueError('Invalid value for outlier_detector. Allowed ' 'string values are "range" and "zscore".') if outlier_params is not None: if not isinstance(outlier_params, dict): raise TypeError("outlier_params must be a dict or None; " "got {}.".format(outlier_params)) if cat_cutoff is not None: if (not isinstance(cat_cutoff, numbers.Number) or not 0. < cat_cutoff <= 1.0): raise ValueError("cat_cutoff must be in (0, 1.0]; got {}." .format(cat_cutoff)) if user_splits is not None: if not isinstance(user_splits, (np.ndarray, list)): raise TypeError("user_splits must be a list or numpy.ndarray.") if user_splits_fixed is not None: if user_splits is None: raise ValueError("user_splits must be provided.") else: if not isinstance(user_splits_fixed, (np.ndarray, list)): raise TypeError("user_splits_fixed must be a list or " "numpy.ndarray.") elif not all(isinstance(s, bool) for s in user_splits_fixed): raise ValueError("user_splits_fixed must be list of boolean.") elif len(user_splits) != len(user_splits_fixed): raise ValueError("Inconsistent length of user_splits and " "user_splits_fixed: {} != {}. Lengths must " "be equal".format(len(user_splits), len(user_splits_fixed))) if special_codes is not None: if not isinstance(special_codes, (np.ndarray, list, dict)): raise TypeError("special_codes must be a dit, list or " "numpy.ndarray.") if isinstance(special_codes, dict) and not len(special_codes): raise ValueError("special_codes empty. special_codes dict must " "contain at least one special.") if split_digits is not None: if (not isinstance(split_digits, numbers.Integral) or not 0 <= split_digits <= 8): raise ValueError("split_digist must be an integer in [0, 8]; " "got {}.".format(split_digits)) if not isinstance(time_limit, numbers.Number) or time_limit < 0: raise ValueError("time_limit must be a positive value in seconds; " "got {}.".format(time_limit)) if not isinstance(verbose, bool): raise TypeError("verbose must be a boolean; got {}.".format(verbose)) class ContinuousOptimalBinning(OptimalBinning): """Optimal binning of a numerical or categorical variable with respect to a continuous target. Parameters ---------- name : str, optional (default="") The variable name. dtype : str, optional (default="numerical") The variable data type. Supported data types are "numerical" for continuous and ordinal variables and "categorical" for categorical and nominal variables. prebinning_method : str, optional (default="cart") The pre-binning method. Supported methods are "cart" for a CART decision tree, "quantile" to generate prebins with approximately same frequency and "uniform" to generate prebins with equal width. Method "cart" uses `sklearn.tree.DecisionTreeRegressor <https://scikit-learn.org/stable/modules/generated/sklearn.tree. DecisionTreeRegressor.html>`_. max_n_prebins : int (default=20) The maximum number of bins after pre-binning (prebins). min_prebin_size : float (default=0.05) The fraction of mininum number of records for each prebin. min_n_bins : int or None, optional (default=None) The minimum number of bins. If None, then ``min_n_bins`` is a value in ``[0, max_n_prebins]``. max_n_bins : int or None, optional (default=None) The maximum number of bins. If None, then ``max_n_bins`` is a value in ``[0, max_n_prebins]``. min_bin_size : float or None, optional (default=None) The fraction of minimum number of records for each bin. If None, ``min_bin_size = min_prebin_size``. max_bin_size : float or None, optional (default=None) The fraction of maximum number of records for each bin. If None, ``max_bin_size = 1.0``. monotonic_trend : str or None, optional (default="auto") The **mean** monotonic trend. Supported trends are “auto”, "auto_heuristic" and "auto_asc_desc" to automatically determine the trend minimize the L1-norm using a machine learning classifier, "ascending", "descending", "concave", "convex", "peak" and "peak_heuristic" to allow a peak change point, and "valley" and "valley_heuristic" to allow a valley change point. Trends "auto_heuristic", "peak_heuristic" and "valley_heuristic" use a heuristic to determine the change point, and are significantly faster for large size instances (``max_n_prebins> 20``). Trend "auto_asc_desc" is used to automatically select the best monotonic trend between "ascending" and "descending". If None, then the monotonic constraint is disabled. min_mean_diff : float, optional (default=0) The minimum mean difference between consecutives bins. This option currently only applies when ``monotonic_trend`` is "ascending" or "descending". max_pvalue : float or None, optional (default=0.05) The maximum p-value among bins. The T-test is used to detect bins not satisfying the p-value constraint. max_pvalue_policy : str, optional (default="consecutive") The method to determine bins not satisfying the p-value constraint. Supported methods are "consecutive" to compare consecutive bins and "all" to compare all bins. outlier_detector : str or None, optional (default=None) The outlier detection method. Supported methods are "range" to use the interquartile range based method or "zcore" to use the modified Z-score method. outlier_params : dict or None, optional (default=None) Dictionary of parameters to pass to the outlier detection method. cat_cutoff : float or None, optional (default=None) Generate bin others with categories in which the fraction of occurrences is below the ``cat_cutoff`` value. This option is available when ``dtype`` is "categorical". user_splits : array-like or None, optional (default=None) The list of pre-binning split points when ``dtype`` is "numerical" or the list of prebins when ``dtype`` is "categorical". user_splits_fixed : array-like or None (default=None) The list of pre-binning split points that must be fixed. special_codes : array-like, dict or None, optional (default=None) List of special codes. Use special codes to specify the data values that must be treated separately. split_digits : int or None, optional (default=None) The significant digits of the split points. If ``split_digits`` is set to 0, the split points are integers. If None, then all significant digits in the split points are considered. time_limit : int (default=100) The maximum time in seconds to run the optimization solver. verbose : bool (default=False) Enable verbose output. **prebinning_kwargs : keyword arguments The pre-binning keywrord arguments. .. versionadded:: 0.6.1 Notes ----- The parameter values ``max_n_prebins`` and ``min_prebin_size`` control complexity and memory usage. The default values generally produce quality results, however, some improvement can be achieved by increasing ``max_n_prebins`` and/or decreasing ``min_prebin_size``. The T-test uses an estimate of the standard deviation of the contingency table to speed up the model generation and reduce memory usage. Therefore, it is not guaranteed to obtain bins satisfying the p-value constraint, although it may work reasonably well in most cases. To avoid having bins with similar bins the parameter ``min_mean_diff`` is recommended. """ def __init__(self, name="", dtype="numerical", prebinning_method="cart", max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend="auto", min_mean_diff=0, max_pvalue=None, max_pvalue_policy="consecutive", outlier_detector=None, outlier_params=None, cat_cutoff=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, time_limit=100, verbose=False, **prebinning_kwargs): self.name = name self.dtype = dtype self.prebinning_method = prebinning_method self.solver = "cp" self.max_n_prebins = max_n_prebins self.min_prebin_size = min_prebin_size self.min_n_bins = min_n_bins self.max_n_bins = max_n_bins self.min_bin_size = min_bin_size self.max_bin_size = max_bin_size self.monotonic_trend = monotonic_trend self.min_mean_diff = min_mean_diff self.max_pvalue = max_pvalue self.max_pvalue_policy = max_pvalue_policy self.outlier_detector = outlier_detector self.outlier_params = outlier_params self.cat_cutoff = cat_cutoff self.user_splits = user_splits self.user_splits_fixed = user_splits_fixed self.special_codes = special_codes self.split_digits = split_digits self.time_limit = time_limit self.verbose = verbose self.prebinning_kwargs = prebinning_kwargs # auxiliary self._categories = None self._cat_others = None self._n_records = None self._sums = None self._stds = None self._min_target = None self._max_target = None self._n_zeros = None self._n_records_cat_others = None self._n_records_missing = None self._n_records_special = None self._sum_cat_others = None self._sum_special = None self._sum_missing = None self._std_cat_others = None self._std_special = None self._std_missing = None self._min_target_missing = None self._min_target_special = None self._min_target_others = None self._max_target_missing = None self._max_target_special = None self._max_target_others = None self._n_zeros_missing = None self._n_zeros_special = None self._n_zeros_others = None self._problem_type = "regression" # info self._binning_table = None self._n_prebins = None self._n_refinements = 0 self._n_samples = None self._optimizer = None self._splits_optimal = None self._status = None # timing self._time_total = None self._time_preprocessing = None self._time_prebinning = None self._time_solver = None self._time_optimizer = None self._time_postprocessing = None self._is_fitted = False def fit(self, x, y, check_input=False): """Fit the optimal binning according to the given training data. Parameters ---------- x : array-like, shape = (n_samples,) Training vector, where n_samples is the number of samples. y : array-like, shape = (n_samples,) Target vector relative to x. check_input : bool (default=False) Whether to check input arrays. Returns ------- self : ContinuousOptimalBinning Fitted optimal binning. """ return self._fit(x, y, check_input) def fit_transform(self, x, y, metric="mean", metric_special=0, metric_missing=0, show_digits=2, check_input=False): """Fit the optimal binning according to the given training data, then transform it. Parameters ---------- x : array-like, shape = (n_samples,) Training vector, where n_samples is the number of samples. y : array-like, shape = (n_samples,) Target vector relative to x. metric : str (default="mean"): The metric used to transform the input vector. Supported metrics are "mean" to choose the mean, "indices" to assign the corresponding indices of the bins and "bins" to assign the corresponding bin interval. metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical mean, and any numerical value. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical mean, and any numerical value. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. check_input : bool (default=False) Whether to check input arrays. Returns ------- x_new : numpy array, shape = (n_samples,) Transformed array. """ return self.fit(x, y, check_input).transform( x, metric, metric_special, metric_missing, show_digits, check_input) def transform(self, x, metric="mean", metric_special=0, metric_missing=0, show_digits=2, check_input=False): """Transform given data to mean using bins from the fitted optimal binning. Parameters ---------- x : array-like, shape = (n_samples,) Training vector, where n_samples is the number of samples. metric : str (default="mean"): The metric used to transform the input vector. Supported metrics are "mean" to choose the mean, "indices" to assign the corresponding indices of the bins and "bins" to assign the corresponding bin interval. metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical mean, and any numerical value. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical mean, and any numerical value. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. check_input : bool (default=False) Whether to check input arrays. Returns ------- x_new : numpy array, shape = (n_samples,) Transformed array. Notes ----- Transformation of data including categories not present during training return zero mean. """ self._check_is_fitted() return transform_continuous_target(self._splits_optimal, self.dtype, x, self._n_records, self._sums, self.special_codes, self._categories, self._cat_others, metric, metric_special, metric_missing, self.user_splits, show_digits, check_input) def _fit(self, x, y, check_input): time_init = time.perf_counter() if self.verbose: logger.info("Optimal binning started.") logger.info("Options: check parameters.") _check_parameters(**self.get_params()) # Pre-processing if self.verbose: logger.info("Pre-processing started.") self._n_samples = len(x) if self.verbose: logger.info("Pre-processing: number of samples: {}" .format(self._n_samples)) time_preprocessing = time.perf_counter() [x_clean, y_clean, x_missing, y_missing, x_special, y_special, y_others, categories, cat_others, _, _, _, _] = split_data( self.dtype, x, y, self.special_codes, self.cat_cutoff, self.user_splits, check_input, self.outlier_detector, self.outlier_params) self._time_preprocessing = time.perf_counter() - time_preprocessing if self.verbose: n_clean = len(x_clean) n_missing = len(x_missing) n_special = len(x_special) logger.info("Pre-processing: number of clean samples: {}" .format(n_clean)) logger.info("Pre-processing: number of missing samples: {}" .format(n_missing)) logger.info("Pre-processing: number of special samples: {}" .format(n_special)) if self.outlier_detector is not None: n_outlier = self._n_samples-(n_clean + n_missing + n_special) logger.info("Pre-processing: number of outlier samples: {}" .format(n_outlier)) if self.dtype == "categorical": n_categories = len(categories) n_categories_others = len(cat_others) n_others = len(y_others) logger.info("Pre-processing: number of others samples: {}" .format(n_others)) logger.info("Pre-processing: number of categories: {}" .format(n_categories)) logger.info("Pre-processing: number of categories others: {}" .format(n_categories_others)) logger.info("Pre-processing terminated. Time: {:.4f}s" .format(self._time_preprocessing)) # Pre-binning if self.verbose: logger.info("Pre-binning started.") time_prebinning = time.perf_counter() if self.user_splits is not None: n_splits = len(self.user_splits) if self.verbose: logger.info("Pre-binning: user splits supplied: {}" .format(n_splits)) if not n_splits: splits = self.user_splits n_records = np.array([]) sums = np.array([]) stds = np.array([]) else: if self.dtype == "numerical": user_splits = check_array( self.user_splits, ensure_2d=False, dtype=None, force_all_finite=True) if len(set(user_splits)) != len(user_splits): raise ValueError("User splits are not unique.") sorted_idx = np.argsort(user_splits) user_splits = user_splits[sorted_idx] else: [categories, user_splits, x_clean, y_clean, y_others, cat_others, _, _, sorted_idx ] = preprocessing_user_splits_categorical( self.user_splits, x_clean, y_clean, None) if self.user_splits_fixed is not None: self.user_splits_fixed = np.asarray( self.user_splits_fixed)[sorted_idx] [splits, n_records, sums, ssums, stds, min_t, max_t, n_zeros] = self._prebinning_refinement( user_splits, x_clean, y_clean, y_missing, x_special, y_special, y_others) else: [splits, n_records, sums, ssums, stds, min_t, max_t, n_zeros] = self._fit_prebinning( x_clean, y_clean, y_missing, x_special, y_special, y_others) self._n_prebins = len(n_records) self._categories = categories self._cat_others = cat_others self._time_prebinning = time.perf_counter() - time_prebinning if self.verbose: logger.info("Pre-binning: number of prebins: {}" .format(self._n_prebins)) logger.info("Pre-binning terminated. Time: {:.4f}s" .format(self._time_prebinning)) # Optimization self._fit_optimizer(splits, n_records, sums, ssums, stds) # Post-processing if self.verbose: logger.info("Post-processing started.") logger.info("Post-processing: compute binning information.") time_postprocessing = time.perf_counter() if not len(splits): n_records = n_records.sum() sums = sums.sum() [self._n_records, self._sums, self._stds, self._min_target, self._max_target, self._n_zeros] = continuous_bin_info( self._solution, n_records, sums, ssums, stds, min_t, max_t, n_zeros, self._n_records_missing, self._sum_missing, self._std_missing, self._min_target_missing, self._max_target_missing, self._n_zeros_missing, self._n_records_special, self._sum_special, self._std_special, self._min_target_special, self._max_target_special, self._n_zeros_special, self._n_records_cat_others, self._sum_cat_others, self._std_cat_others, self._min_target_others, self._max_target_others, self._n_zeros_others, self._cat_others) if self.dtype == "numerical": min_x = x_clean.min() max_x = x_clean.max() else: min_x = None max_x = None self._binning_table = ContinuousBinningTable( self.name, self.dtype, self.special_codes, self._splits_optimal, self._n_records, self._sums, self._stds, self._min_target, self._max_target, self._n_zeros, min_x, max_x, self._categories, self._cat_others, self.user_splits) self._time_postprocessing = time.perf_counter() - time_postprocessing if self.verbose: logger.info("Post-processing terminated. Time: {:.4f}s" .format(self._time_postprocessing)) self._time_total = time.perf_counter() - time_init if self.verbose: logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s" .format(self._status, self._time_total)) # Completed successfully self._is_fitted = True return self def _fit_optimizer(self, splits, n_records, sums, ssums, stds): if self.verbose: logger.info("Optimizer started.") time_init = time.perf_counter() if len(n_records) <= 1: self._status = "OPTIMAL" self._splits_optimal = splits self._solution = np.zeros(len(splits)).astype(bool) if self.verbose: logger.warning("Optimizer: {} bins after pre-binning." .format(len(n_records))) logger.warning("Optimizer: solver not run.") logger.info("Optimizer terminated. Time: 0s") return if self.min_bin_size is not None: min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples)) else: min_bin_size = self.min_bin_size if self.max_bin_size is not None: max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples)) else: max_bin_size = self.max_bin_size # Monotonic trend trend_change = None if self.dtype == "numerical": auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc") if self.monotonic_trend in auto_monotonic_modes: monotonic = auto_monotonic_continuous( n_records, sums, self.monotonic_trend) if self.monotonic_trend == "auto_heuristic": if monotonic in ("peak", "valley"): if monotonic == "peak": monotonic = "peak_heuristic" else: monotonic = "valley_heuristic" mean = sums / n_records trend_change = peak_valley_trend_change_heuristic( mean, monotonic) if self.verbose: logger.info("Optimizer: classifier predicts {} " "monotonic trend.".format(monotonic)) else: monotonic = self.monotonic_trend if monotonic in ("peak_heuristic", "valley_heuristic"): mean = sums / n_records trend_change = peak_valley_trend_change_heuristic( mean, monotonic) else: monotonic = self.monotonic_trend if monotonic is not None: monotonic = "ascending" if self.verbose: if monotonic is None: logger.info( "Optimizer: monotonic trend not set.") else: logger.info("Optimizer: monotonic trend set to {}." .format(monotonic)) optimizer = ContinuousBinningCP(monotonic, self.min_n_bins, self.max_n_bins, min_bin_size, max_bin_size, self.min_mean_diff, self.max_pvalue, self.max_pvalue_policy, self.user_splits_fixed, self.time_limit) if self.verbose: logger.info("Optimizer: build model...") optimizer.build_model(n_records, sums, ssums, trend_change) if self.verbose: logger.info("Optimizer: solve...") status, solution = optimizer.solve() self._solution = solution self._optimizer, self._time_optimizer = solver_statistics( self.solver, optimizer.solver_) self._status = status if self.dtype == "categorical" and self.user_splits is not None: self._splits_optimal = splits[solution] else: self._splits_optimal = splits[solution[:-1]] self._time_solver = time.perf_counter() - time_init if self.verbose: logger.info("Optimizer terminated. Time: {:.4f}s" .format(self._time_solver)) def _prebinning_refinement(self, splits_prebinning, x, y, y_missing, x_special, y_special, y_others, sw_clean=None, sw_missing=None, sw_special=None, sw_others=None): n_splits = len(splits_prebinning) if not n_splits: return splits_prebinning, np.array([]), np.array([]) if self.split_digits is not None: splits_prebinning = np.round(splits_prebinning, self.split_digits) if self.dtype == "categorical" and self.user_splits is not None: indices = np.digitize(x, splits_prebinning, right=True) n_bins = n_splits else: indices = np.digitize(x, splits_prebinning, right=False) n_bins = n_splits + 1 # Compute n_records, sum and std for special, missing and others # self._n_records_special = len(y_special) # self._sum_special = np.sum(y_special) # self._n_zeros_special = np.count_nonzero(y_special == 0) # if len(y_special): # self._std_special = np.std(y_special) # self._min_target_special = np.min(y_special) # self._max_target_special = np.max(y_special) [self._n_records_special, self._sum_special, self._n_zeros_special, self._std_special, self._min_target_special, self._max_target_special] = target_info_special_continuous( self.special_codes, x_special, y_special) self._n_records_missing = len(y_missing) self._sum_missing = np.sum(y_missing) self._n_zeros_missing = np.count_nonzero(y_missing == 0) if len(y_missing): self._std_missing = np.std(y_missing) self._min_target_missing = np.min(y_missing) self._max_target_missing = np.max(y_missing) if len(y_others): self._n_records_cat_others = len(y_others) self._sum_cat_others = np.sum(y_others) self._std_cat_others = np.std(y_others) self._min_target_others = np.min(y_others) self._max_target_others = np.max(y_others) self._n_zeros_others = np.count_nonzero(y_others == 0) n_records = np.empty(n_bins).astype(np.int64) sums = np.empty(n_bins) ssums = np.empty(n_bins) stds = np.empty(n_bins) n_zeros = np.empty(n_bins).astype(np.int64) min_t = np.full(n_bins, -np.inf) max_t = np.full(n_bins, np.inf) # Compute prebin information for i in range(n_bins): mask = (indices == i) n_records[i] = np.count_nonzero(mask) ymask = y[mask] sums[i] = np.sum(ymask) ssums[i] = np.sum(ymask ** 2) stds[i] = np.std(ymask) n_zeros[i] = np.count_nonzero(ymask == 0) if len(ymask): min_t[i] = np.min(ymask) max_t[i] = np.max(ymask) return (splits_prebinning, n_records, sums, ssums, stds, min_t, max_t, n_zeros) @property def binning_table(self): """Return an instantiated binning table. Please refer to :ref:`Binning table: continuous target`. Returns ------- binning_table : ContinuousBinningTable. """ self._check_is_fitted() return self._binning_table ``` #### File: binning/distributed/base.py ```python from ...exceptions import NotSolvedError class BaseSketch: def _check_is_solved(self): if not self._is_solved: raise NotSolvedError("This {} instance is not solved yet. Call " "'solve' with appropriate arguments." .format(self.__class__.__name__)) ``` #### File: optbinning/binning/mip.py ```python import numpy as np from ortools.linear_solver import pywraplp from .model_data import model_data class BinningMIP: def __init__(self, monotonic_trend, min_n_bins, max_n_bins, min_bin_size, max_bin_size, min_bin_n_event, max_bin_n_event, min_bin_n_nonevent, max_bin_n_nonevent, min_event_rate_diff, max_pvalue, max_pvalue_policy, gamma, user_splits_fixed, mip_solver, time_limit): self.monotonic_trend = monotonic_trend self.min_n_bins = min_n_bins self.max_n_bins = max_n_bins self.min_bin_size = min_bin_size self.max_bin_size = max_bin_size self.min_bin_n_event = min_bin_n_event self.max_bin_n_event = max_bin_n_event self.min_bin_n_nonevent = min_bin_n_nonevent self.max_bin_n_nonevent = max_bin_n_nonevent self.min_event_rate_diff = min_event_rate_diff self.max_pvalue = max_pvalue self.max_pvalue_policy = max_pvalue_policy self.gamma = gamma self.user_splits_fixed = user_splits_fixed self.mip_solver = mip_solver self.time_limit = time_limit self.solver_ = None self._n = None self._x = None def build_model(self, divergence, n_nonevent, n_event, trend_change): # Parameters D, V, pvalue_violation_indices = model_data(divergence, n_nonevent, n_event, self.max_pvalue, self.max_pvalue_policy) n = len(n_nonevent) n_records = n_nonevent + n_event # Initialize solver if self.mip_solver == "bop": solver = pywraplp.Solver( 'BinningMIP', pywraplp.Solver.BOP_INTEGER_PROGRAMMING) elif self.mip_solver == "cbc": solver = pywraplp.Solver( 'BinningMIP', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING) # Decision variables x, y, t, d, u, bin_size_diff = self.decision_variables(solver, n) # Objective function if self.gamma: total_records = int(n_records.sum()) regularization = self.gamma / total_records pmax = solver.IntVar(0, total_records, "pmax") pmin = solver.IntVar(0, total_records, "pmin") solver.Maximize(solver.Sum([(V[i][i] * x[i, i]) + solver.Sum([(V[i][j] - V[i][j+1]) * x[i, j] for j in range(i)]) for i in range(n)]) - regularization * (pmax - pmin)) else: solver.Maximize(solver.Sum([(V[i][i] * x[i, i]) + solver.Sum([(V[i][j] - V[i][j+1]) * x[i, j] for j in range(i)]) for i in range(n)])) # Constraint: unique assignment self.add_constraint_unique_assignment(solver, n, x) # Constraint: continuity self.add_constraint_continuity(solver, n, x) # Constraint: min / max bins self.add_constraint_min_max_bins(solver, n, x, d) # Constraint: min / max bin size self.add_constraint_min_max_bin_size(solver, n, x, u, n_records, bin_size_diff) # Constraint: min / max n_nonevent per bin if (self.min_bin_n_nonevent is not None or self.max_bin_n_nonevent is not None): for i in range(n): bin_ne_size = solver.Sum([n_nonevent[j] * x[i, j] for j in range(i + 1)]) if self.min_bin_n_nonevent is not None: solver.Add(bin_ne_size >= self.min_bin_n_nonevent*x[i, i]) if self.max_bin_n_nonevent is not None: solver.Add(bin_ne_size <= self.max_bin_n_nonevent*x[i, i]) # Constraint: min / max n_event per bin if (self.min_bin_n_event is not None or self.max_bin_n_event is not None): for i in range(n): bin_e_size = solver.Sum([n_event[j] * x[i, j] for j in range(i + 1)]) if self.min_bin_n_event is not None: solver.Add(bin_e_size >= self.min_bin_n_event * x[i, i]) if self.max_bin_n_event is not None: solver.Add(bin_e_size <= self.max_bin_n_event * x[i, i]) # Constraints: monotonicity if self.monotonic_trend == "ascending": self.add_constraint_monotonic_ascending(solver, n, D, x) elif self.monotonic_trend == "descending": self.add_constraint_monotonic_descending(solver, n, D, x) elif self.monotonic_trend == "concave": self.add_constraint_monotonic_concave(solver, n, D, x) elif self.monotonic_trend == "convex": self.add_constraint_monotonic_convex(solver, n, D, x) elif self.monotonic_trend in ("peak", "valley"): for i in range(n): solver.Add(t >= i - n * (1 - y[i])) solver.Add(t <= i + n * y[i]) if self.monotonic_trend == "peak": self.add_constraint_monotonic_peak(solver, n, D, x, y) else: self.add_constraint_monotonic_valley(solver, n, D, x, y) elif self.monotonic_trend == "peak_heuristic": self.add_constraint_monotonic_peak_heuristic( solver, n, D, x, trend_change) elif self.monotonic_trend == "valley_heuristic": self.add_constraint_monotonic_valley_heuristic( solver, n, D, x, trend_change) # Constraint: reduction of dominating bins if self.gamma: for i in range(n): bin_size = solver.Sum([n_records[j] * x[i, j] for j in range(i + 1)]) solver.Add(pmin <= total_records * (1 - x[i, i]) + bin_size) solver.Add(pmax >= bin_size) solver.Add(pmin <= pmax) # Constraint: max-pvalue self.add_max_pvalue_constraint(solver, x, pvalue_violation_indices) # Constraint: fixed splits self.add_constraint_fixed_splits(solver, n, x) self.solver_ = solver self._n = n self._x = x def solve(self): self.solver_.SetTimeLimit(self.time_limit * 1000) status = self.solver_.Solve() if status in (pywraplp.Solver.OPTIMAL, pywraplp.Solver.FEASIBLE): if status == pywraplp.Solver.OPTIMAL: status_name = "OPTIMAL" else: status_name = "FEASIBLE" solution = np.array([self._x[i, i].solution_value() for i in range(self._n)]).astype(bool) else: if status == pywraplp.Solver.ABNORMAL: status_name = "ABNORMAL" elif status == pywraplp.Solver.INFEASIBLE: status_name = "INFEASIBLE" elif status == pywraplp.Solver.UNBOUNDED: status_name = "UNBOUNDED" else: status_name = "UNKNOWN" solution = np.zeros(self._n).astype(bool) solution[-1] = True return status_name, solution def decision_variables(self, solver, n): x = {} for i in range(n): for j in range(i + 1): x[i, j] = solver.BoolVar("x[{}, {}]".format(i, j)) y = None t = None d = None u = None bin_size_diff = None if self.monotonic_trend in ("peak", "valley"): # Auxiliary binary variables y = {} for i in range(n): y[i] = solver.BoolVar("y[{}]".format(i)) # Change point t = solver.IntVar(0, n, "t") if self.min_n_bins is not None and self.max_n_bins is not None: n_bin_diff = self.max_n_bins - self.min_n_bins # Range constraints auxiliary variables d = solver.IntVar(0, n_bin_diff, "n_bin_diff") if self.min_bin_size is not None and self.max_bin_size is not None: bin_size_diff = self.max_bin_size - self.min_bin_size # Range constraints auxiliary variables u = {} for i in range(n): u[i] = solver.IntVar(0, bin_size_diff, "u[{}]".format(i)) return x, y, t, d, u, bin_size_diff def add_constraint_unique_assignment(self, solver, n, x): for j in range(n): solver.Add(solver.Sum([x[i, j] for i in range(j, n)]) == 1) def add_constraint_continuity(self, solver, n, x): for i in range(n): for j in range(i): solver.Add(x[i, j] - x[i, j+1] <= 0) def add_constraint_min_max_bins(self, solver, n, x, d): if self.min_n_bins is not None or self.max_n_bins is not None: trace = solver.Sum([x[i, i] for i in range(n)]) if self.min_n_bins is not None and self.max_n_bins is not None: solver.Add(d + trace - self.max_n_bins == 0) elif self.min_n_bins is not None: solver.Add(trace >= self.min_n_bins) elif self.max_n_bins is not None: solver.Add(trace <= self.max_n_bins) def add_constraint_min_max_bin_size(self, solver, n, x, u, n_records, bin_size_diff): if self.min_bin_size is not None or self.max_bin_size is not None: for i in range(n): bin_size = solver.Sum([n_records[j] * x[i, j] for j in range(i + 1)]) if (self.min_bin_size is not None and self.max_bin_size is not None): solver.Add(u[i] + bin_size - self.max_bin_size * x[i, i] == 0) solver.Add(u[i] <= bin_size_diff * x[i, i]) elif self.min_bin_size is not None: solver.Add(bin_size >= self.min_bin_size * x[i, i]) elif self.max_bin_size is not None: solver.Add(bin_size <= self.max_bin_size * x[i, i]) def add_constraint_monotonic_ascending(self, solver, n, D, x): for i in range(1, n): for z in range(i): solver.Add( solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) + D[z][z] * x[z, z] - 1 - (D[i][i] - 1) * x[i, i] - solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) + self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0) # Preprocessing if self.min_event_rate_diff == 0: for i in range(n - 1): if D[i+1][i] - D[i+1][i+1] > 0: solver.Add(x[i, i] == 0) for j in range(n - i - 1): if D[i+1+j][i] - D[i+1+j][i+1+j] > 0: solver.Add(x[i+j, i+j] == 0) def add_constraint_monotonic_descending(self, solver, n, D, x): for i in range(1, n): for z in range(i): solver.Add( solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) + D[i][i] * x[i, i] - 1 - (D[z][z] - 1) * x[z, z] - solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) + self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0) # Preprocessing if self.min_event_rate_diff == 0: for i in range(n - 1): if D[i+1][i] - D[i+1][i+1] < 0: solver.Add(x[i, i] == 0) for j in range(n - i - 1): if D[i+1+j][i] - D[i+1+j][i+1+j] < 0: solver.Add(x[i+j, i+j] == 0) def add_constraint_monotonic_concave(self, solver, n, D, x): for i in range(2, n): for j in range(1, i): for k in range(j): solver.Add( -(solver.Sum([(D[i][z] - D[i][z+1]) * x[i, z] for z in range(i)]) + D[i][i]*x[i, i]) + 2 * (solver.Sum([(D[j][z] - D[j][z+1]) * x[j, z] for z in range(j)]) + D[j][j] * x[j, j]) - (solver.Sum([(D[k][z] - D[k][z+1]) * x[k, z] for z in range(k)]) + D[k][k] * x[k, k]) >= ( x[i, i] + x[j, j] + x[k, k] - 3)) def add_constraint_monotonic_convex(self, solver, n, D, x): for i in range(2, n): for j in range(1, i): for k in range(j): solver.Add( (solver.Sum([(D[i][z] - D[i][z+1]) * x[i, z] for z in range(i)]) + D[i][i] * x[i, i]) - 2 * (solver.Sum([(D[j][z] - D[j][z+1]) * x[j, z] for z in range(j)]) + D[j][j] * x[j, j]) + (solver.Sum([(D[k][z] - D[k][z+1]) * x[k, z] for z in range(k)]) + D[k][k] * x[k, k]) >= ( x[i, i] + x[j, j] + x[k, k] - 3)) def add_constraint_monotonic_peak(self, solver, n, D, x, y): for i in range(1, n): for z in range(i): solver.Add( y[i] + y[z] + 1 + (D[z][z] - 1) * x[z, z] + solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) - solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) - D[i][i] * x[i, i] >= 0) solver.Add( 2 - y[i] - y[z] + 1 + (D[i][i] - 1) * x[i, i] + solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) - solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) - D[z][z] * x[z, z] >= 0) def add_constraint_monotonic_valley(self, solver, n, D, x, y): for i in range(1, n): for z in range(i): solver.Add( y[i] + y[z] + 1 + (D[i][i] - 1) * x[i, i] + solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) - solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) - D[z][z] * x[z, z] >= 0) solver.Add( 2 - y[i] - y[z] + 1 + (D[z][z] - 1) * x[z, z] + solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) - solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) - D[i][i] * x[i, i] >= 0) def add_constraint_monotonic_peak_heuristic(self, solver, n, D, x, tc): for i in range(1, tc): for z in range(i): solver.Add( solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) + D[z][z] * x[z, z] - 1 - (D[i][i] - 1) * x[i, i] - solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) + self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0) # Preprocessing if self.min_event_rate_diff == 0: for i in range(tc - 1): if D[i+1][i] - D[i+1][i+1] > 0: solver.Add(x[i, i] == 0) for j in range(tc - i - 1): if D[i+1+j][i] - D[i+1+j][i+1+j] > 0: solver.Add(x[i+j, i+j] == 0) for i in range(tc, n): for z in range(tc, i): solver.Add( solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) + D[i][i] * x[i, i] - 1 - (D[z][z] - 1) * x[z, z] - solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) + self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0) # Preprocessing if self.min_event_rate_diff == 0: for i in range(tc, n - 1): if D[i+1][i] - D[i+1][i+1] < 0: solver.Add(x[i, i] == 0) for j in range(tc, n - i - 1): if D[i+1+j][i] - D[i+1+j][i+1+j] < 0: solver.Add(x[i+j, i+j] == 0) def add_constraint_monotonic_valley_heuristic(self, solver, n, D, x, tc): for i in range(1, tc): for z in range(i): solver.Add( solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) + D[i][i] * x[i, i] - 1 - (D[z][z] - 1) * x[z, z] - solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) + self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0) # Preprocessing if self.min_event_rate_diff == 0: for i in range(tc - 1): if D[i+1][i] - D[i+1][i+1] < 0: solver.Add(x[i, i] == 0) for j in range(tc - i - 1): if D[i+1+j][i] - D[i+1+j][i+1+j] < 0: solver.Add(x[i+j, i+j] == 0) for i in range(tc, n): for z in range(tc, i): solver.Add( solver.Sum([(D[z][j] - D[z][j+1]) * x[z, j] for j in range(z)]) + D[z][z] * x[z, z] - 1 - (D[i][i] - 1) * x[i, i] - solver.Sum([(D[i][j] - D[i][j + 1]) * x[i, j] for j in range(i)]) + self.min_event_rate_diff * (x[i, i] + x[z, z] - 1) <= 0) # Preprocessing if self.min_event_rate_diff == 0: for i in range(tc, n - 1): if D[i+1][i] - D[i+1][i+1] > 0: solver.Add(x[i, i] == 0) for j in range(tc, n - i - 1): if D[i+1+j][i] - D[i+1+j][i+1+j] > 0: solver.Add(x[i+j, i+j] == 0) def add_max_pvalue_constraint(self, solver, x, pvalue_violation_indices): for ind1, ind2 in pvalue_violation_indices: solver.Add(x[ind1[0], ind1[1]] + x[ind2[0], ind2[1]] <= 1) def add_constraint_fixed_splits(self, solver, n, x): if self.user_splits_fixed is not None: for i in range(n - 1): if self.user_splits_fixed[i]: solver.Add(x[i, i] == 1) ``` #### File: binning/multidimensional/binning_2d.py ```python import numbers import time import numpy as np from joblib import effective_n_jobs from sklearn.tree import DecisionTreeClassifier from ...information import solver_statistics from ...logging import Logger from ..binning import OptimalBinning from ..binning_statistics import target_info from ..prebinning import PreBinning from .binning_statistics_2d import BinningTable2D from .cp_2d import Binning2DCP from .mip_2d import Binning2DMIP from .model_data_2d import model_data from .model_data_cart_2d import model_data_cart from .preprocessing_2d import split_data_2d from .transformations_2d import transform_binary_target logger = Logger(__name__).logger def _check_parameters(name_x, name_y, dtype_x, dtype_y, prebinning_method, strategy, solver, divergence, max_n_prebins_x, max_n_prebins_y, min_prebin_size_x, min_prebin_size_y, min_n_bins, max_n_bins, min_bin_size, max_bin_size, min_bin_n_nonevent, max_bin_n_nonevent, min_bin_n_event, max_bin_n_event, monotonic_trend_x, monotonic_trend_y, min_event_rate_diff_x, min_event_rate_diff_y, gamma, special_codes_x, special_codes_y, split_digits, n_jobs, time_limit, verbose): if not isinstance(name_x, str): raise TypeError("name_x must be a string.") if not isinstance(name_y, str): raise TypeError("name_y must be a string.") if dtype_x not in ("numerical",): raise ValueError('Invalid value for dtype_x. Allowed string ' 'values is "numerical".') if dtype_y not in ("numerical",): raise ValueError('Invalid value for dtype_y. Allowed string ' 'values is "numerical".') if prebinning_method not in ("cart", "mdlp", "quantile", "uniform"): raise ValueError('Invalid value for prebinning_method. Allowed string ' 'values are "cart", "mdlp", "quantile" ' 'and "uniform".') if strategy not in ("grid", "cart"): raise ValueError('Invalid value for strategy. Allowed string ' 'values are "grid" and "cart".') if solver not in ("cp", "ls", "mip"): raise ValueError('Invalid value for solver. Allowed string ' 'values are "cp", "ls" and "mip".') if divergence not in ("iv", "js", "hellinger", "triangular"): raise ValueError('Invalid value for divergence. Allowed string ' 'values are "iv", "js", "helliger" and "triangular".') if (not isinstance(max_n_prebins_x, numbers.Integral) or max_n_prebins_x <= 1): raise ValueError("max_prebins_x must be an integer greater than 1; " "got {}.".format(max_n_prebins_x)) if (not isinstance(max_n_prebins_y, numbers.Integral) or max_n_prebins_y <= 1): raise ValueError("max_prebins_y must be an integer greater than 1; " "got {}.".format(max_n_prebins_y)) if not 0. < min_prebin_size_x <= 0.5: raise ValueError("min_prebin_size_x must be in (0, 0.5]; got {}." .format(min_prebin_size_x)) if not 0. < min_prebin_size_y <= 0.5: raise ValueError("min_prebin_size_y must be in (0, 0.5]; got {}." .format(min_prebin_size_y)) if min_n_bins is not None: if not isinstance(min_n_bins, numbers.Integral) or min_n_bins <= 0: raise ValueError("min_n_bins must be a positive integer; got {}." .format(min_n_bins)) if max_n_bins is not None: if not isinstance(max_n_bins, numbers.Integral) or max_n_bins <= 0: raise ValueError("max_n_bins must be a positive integer; got {}." .format(max_n_bins)) if min_n_bins is not None and max_n_bins is not None: if min_n_bins > max_n_bins: raise ValueError("min_n_bins must be <= max_n_bins; got {} <= {}." .format(min_n_bins, max_n_bins)) if min_bin_size is not None: if (not isinstance(min_bin_size, numbers.Number) or not 0. < min_bin_size <= 0.5): raise ValueError("min_bin_size must be in (0, 0.5]; got {}." .format(min_bin_size)) if max_bin_size is not None: if (not isinstance(max_bin_size, numbers.Number) or not 0. < max_bin_size <= 1.0): raise ValueError("max_bin_size must be in (0, 1.0]; got {}." .format(max_bin_size)) if min_bin_size is not None and max_bin_size is not None: if min_bin_size > max_bin_size: raise ValueError("min_bin_size must be <= max_bin_size; " "got {} <= {}.".format(min_bin_size, max_bin_size)) if min_bin_n_nonevent is not None: if (not isinstance(min_bin_n_nonevent, numbers.Integral) or min_bin_n_nonevent <= 0): raise ValueError("min_bin_n_nonevent must be a positive integer; " "got {}.".format(min_bin_n_nonevent)) if max_bin_n_nonevent is not None: if (not isinstance(max_bin_n_nonevent, numbers.Integral) or max_bin_n_nonevent <= 0): raise ValueError("max_bin_n_nonevent must be a positive integer; " "got {}.".format(max_bin_n_nonevent)) if min_bin_n_nonevent is not None and max_bin_n_nonevent is not None: if min_bin_n_nonevent > max_bin_n_nonevent: raise ValueError("min_bin_n_nonevent must be <= " "max_bin_n_nonevent; got {} <= {}." .format(min_bin_n_nonevent, max_bin_n_nonevent)) if min_bin_n_event is not None: if (not isinstance(min_bin_n_event, numbers.Integral) or min_bin_n_event <= 0): raise ValueError("min_bin_n_event must be a positive integer; " "got {}.".format(min_bin_n_event)) if max_bin_n_event is not None: if (not isinstance(max_bin_n_event, numbers.Integral) or max_bin_n_event <= 0): raise ValueError("max_bin_n_event must be a positive integer; " "got {}.".format(max_bin_n_event)) if min_bin_n_event is not None and max_bin_n_event is not None: if min_bin_n_event > max_bin_n_event: raise ValueError("min_bin_n_event must be <= " "max_bin_n_event; got {} <= {}." .format(min_bin_n_event, max_bin_n_event)) if monotonic_trend_x is not None: if monotonic_trend_x not in ("ascending", "descending"): raise ValueError('Invalid value for monotonic trend x. Allowed ' 'string values are "ascending" and "descending".') if monotonic_trend_y is not None: if monotonic_trend_y not in ("ascending", "descending"): raise ValueError('Invalid value for monotonic trend y. Allowed ' 'string values are "ascending" and "descending".') if (not isinstance(min_event_rate_diff_x, numbers.Number) or not 0. <= min_event_rate_diff_x <= 1.0): raise ValueError("min_event_rate_diff_x must be in [0, 1]; got {}." .format(min_event_rate_diff_x)) if (not isinstance(min_event_rate_diff_y, numbers.Number) or not 0. <= min_event_rate_diff_y <= 1.0): raise ValueError("min_event_rate_diff_y must be in [0, 1]; got {}." .format(min_event_rate_diff_y)) if not isinstance(gamma, numbers.Number) or gamma < 0: raise ValueError("gamma must be >= 0; got {}.".format(gamma)) if special_codes_x is not None: if not isinstance(special_codes_x, (np.ndarray, list)): raise TypeError("special_codes_x must be a list or numpy.ndarray.") if special_codes_y is not None: if not isinstance(special_codes_y, (np.ndarray, list)): raise TypeError("special_codes_y must be a list or numpy.ndarray.") if split_digits is not None: if (not isinstance(split_digits, numbers.Integral) or not 0 <= split_digits <= 8): raise ValueError("split_digits must be an integer in [0, 8]; " "got {}.".format(split_digits)) if n_jobs is not None: if not isinstance(n_jobs, numbers.Integral): raise ValueError("n_jobs must be an integer or None; got {}." .format(n_jobs)) if not isinstance(time_limit, numbers.Number) or time_limit < 0: raise ValueError("time_limit must be a positive value in seconds; " "got {}.".format(time_limit)) if not isinstance(verbose, bool): raise TypeError("verbose must be a boolean; got {}.".format(verbose)) class OptimalBinning2D(OptimalBinning): """Optimal binning of two numerical variables with respect to a binary target. Parameters ---------- name_x : str, optional (default="") The name of variable x. name_y : str, optional (default="") The name of variable y. dtype_x : str, optional (default="numerical") The data type of variable x. Supported data type is "numerical" for continuous and ordinal variables. dtype_y : str, optional (default="numerical") The data type of variable y. Supported data type is "numerical" for continuous and ordinal variables. prebinning_method : str, optional (default="cart") The pre-binning method. Supported methods are "cart" for a CART decision tree, "mdlp" for Minimum Description Length Principle (MDLP), "quantile" to generate prebins with approximately same frequency and "uniform" to generate prebins with equal width. Method "cart" uses `sklearn.tree.DecistionTreeClassifier <https://scikit-learn.org/stable/modules/generated/sklearn.tree. DecisionTreeClassifier.html>`_. strategy: str, optional (default="grid") The strategy used to create the initial prebinning 2D after computing prebinning splits on the x and y axis. The strategy "grid" creates a prebinning 2D with n_prebins_x times n_prebins_y elements. The strategy "cart" (experimental) reduces the number of elements by pruning. The latter is recommended when the number of prebins is large. solver : str, optional (default="cp") The optimizer to solve the optimal binning problem. Supported solvers are "mip" to choose a mixed-integer programming solver, and "cp" to choose a constrained programming solver. divergence : str, optional (default="iv") The divergence measure in the objective function to be maximized. Supported divergences are "iv" (Information Value or Jeffrey's divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence) and "triangular" (triangular discrimination). max_n_prebins_x : int (default=5) The maximum number of bins on variable x after pre-binning (prebins). max_n_prebins_y : int (default=5) The maximum number of bins on variable y after pre-binning (prebins). min_prebin_size_x : float (default=0.05) The fraction of mininum number of records for each prebin on variable x. min_prebin_size_y : float (default=0.05) The fraction of mininum number of records for each prebin on variable y. min_n_bins : int or None, optional (default=None) The minimum number of bins. If None, then ``min_n_bins`` is a value in ``[0, max_n_prebins]``. max_n_bins : int or None, optional (default=None) The maximum number of bins. If None, then ``max_n_bins`` is a value in ``[0, max_n_prebins]``. min_bin_size : float or None, optional (default=None) The fraction of minimum number of records for each bin. If None, ``min_bin_size = min_prebin_size``. max_bin_size : float or None, optional (default=None) The fraction of maximum number of records for each bin. If None, ``max_bin_size = 1.0``. min_bin_n_nonevent : int or None, optional (default=None) The minimum number of non-event records for each bin. If None, ``min_bin_n_nonevent = 1``. max_bin_n_nonevent : int or None, optional (default=None) The maximum number of non-event records for each bin. If None, then an unlimited number of non-event records for each bin. min_bin_n_event : int or None, optional (default=None) The minimum number of event records for each bin. If None, ``min_bin_n_event = 1``. max_bin_n_event : int or None, optional (default=None) The maximum number of event records for each bin. If None, then an unlimited number of event records for each bin. monotonic_trend_x : str or None, optional (default=None) The **event rate** monotonic trend on the x axis. Supported trends are “ascending”, and "descending". If None, then the monotonic constraint is disabled. monotonic_trend_y : str or None, optional (default=None) The **event rate** monotonic trend on the y axis. Supported trends are “ascending”, and "descending". If None, then the monotonic constraint is disabled. min_event_rate_diff_x : float, optional (default=0) The minimum event rate difference between consecutives bins on the x axis. min_event_rate_diff_y : float, optional (default=0) The minimum event rate difference between consecutives bins on the y axis. gamma : float, optional (default=0) Regularization strength to reduce the number of dominating bins. Larger values specify stronger regularization. special_codes_x : array-like or None, optional (default=None) List of special codes for the variable x. Use special codes to specify the data values that must be treated separately. special_codes_y : array-like or None, optional (default=None) List of special codes for the variable y. Use special codes to specify the data values that must be treated separately. split_digits : int or None, optional (default=None) The significant digits of the split points. If ``split_digits`` is set to 0, the split points are integers. If None, then all significant digits in the split points are considered. n_jobs : int or None, optional (default=None) Number of cores to run in parallel while binning variables. ``None`` means 1 core. ``-1`` means using all processors. time_limit : int (default=100) The maximum time in seconds to run the optimization solver. verbose : bool (default=False) Enable verbose output. """ def __init__(self, name_x="", name_y="", dtype_x="numerical", dtype_y="numerical", prebinning_method="cart", strategy="grid", solver="cp", divergence="iv", max_n_prebins_x=5, max_n_prebins_y=5, min_prebin_size_x=0.05, min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None, max_bin_n_nonevent=None, min_bin_n_event=None, max_bin_n_event=None, monotonic_trend_x=None, monotonic_trend_y=None, min_event_rate_diff_x=0, min_event_rate_diff_y=0, gamma=0, special_codes_x=None, special_codes_y=None, split_digits=None, n_jobs=1, time_limit=100, verbose=False): self.name_x = name_x self.name_y = name_y self.dtype_x = dtype_x self.dtype_y = dtype_y self.prebinning_method = prebinning_method self.strategy = strategy self.solver = solver self.divergence = divergence self.max_n_prebins_x = max_n_prebins_x self.max_n_prebins_y = max_n_prebins_y self.min_prebin_size_x = min_prebin_size_x self.min_prebin_size_y = min_prebin_size_y self.min_n_bins = min_n_bins self.max_n_bins = max_n_bins self.min_bin_size = min_bin_size self.max_bin_size = max_bin_size self.min_bin_n_event = min_bin_n_event self.max_bin_n_event = max_bin_n_event self.min_bin_n_nonevent = min_bin_n_nonevent self.max_bin_n_nonevent = max_bin_n_nonevent self.monotonic_trend_x = monotonic_trend_x self.monotonic_trend_y = monotonic_trend_y self.min_event_rate_diff_x = min_event_rate_diff_x self.min_event_rate_diff_y = min_event_rate_diff_y self.gamma = gamma self.special_codes_x = special_codes_x self.special_codes_y = special_codes_y self.split_digits = split_digits self.n_jobs = n_jobs self.time_limit = time_limit self.verbose = verbose # auxiliary self._n_event = None self._n_nonevent = None self._n_event_special = None self._n_nonevent_special = None self._n_event_missing = None self._n_nonevent_missing = None self._problem_type = "classification" # info self._binning_table = None self._n_prebins = None self._n_refinements = 0 self._n_samples = None self._optimizer = None self._solution = None self._splits_x_optimal = None self._splits_y_optimal = None self._status = None # timing self._time_total = None self._time_preprocessing = None self._time_prebinning = None self._time_solver = None self._time_optimizer = None self._time_postprocessing = None self._is_fitted = False def fit(self, x, y, z, check_input=False): """Fit the optimal binning 2D according to the given training data. Parameters ---------- x : array-like, shape = (n_samples,) Training vector x, where n_samples is the number of samples. y : array-like, shape = (n_samples,) Training vector y, where n_samples is the number of samples. z : array-like, shape = (n_samples,) Target vector relative to x and y. check_input : bool (default=False) Whether to check input arrays. Returns ------- self : OptimalBinning2D Fitted optimal binning 2D. """ return self._fit(x, y, z, check_input) def fit_transform(self, x, y, z, metric="woe", metric_special=0, metric_missing=0, show_digits=2, check_input=False): """Fit the optimal binning 2D according to the given training data, then transform it. Parameters ---------- x : array-like, shape = (n_samples,) Training vector x, where n_samples is the number of samples. y : array-like, shape = (n_samples,) Training vector y, where n_samples is the number of samples. z : array-like, shape = (n_samples,) Target vector relative to x and y. metric : str (default="woe") The metric used to transform the input vector. Supported metrics are "woe" to choose the Weight of Evidence, "event_rate" to choose the event rate, "indices" to assign the corresponding indices of the bins and "bins" to assign the corresponding bin interval. metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate, and any numerical value. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate and any numerical value. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. check_input : bool (default=False) Whether to check input arrays. Returns ------- z_new : numpy array, shape = (n_samples,) Transformed array. """ return self.fit(x, y, z, check_input).transform( x, y, metric, metric_special, metric_missing, show_digits, check_input) def transform(self, x, y, metric="woe", metric_special=0, metric_missing=0, show_digits=2, check_input=False): """Transform given data to Weight of Evidence (WoE) or event rate using bins from the fitted optimal binning 2D. Parameters ---------- x : array-like, shape = (n_samples,) Training vector x, where n_samples is the number of samples. y : array-like, shape = (n_samples,) Training vector y, where n_samples is the number of samples. metric : str (default="woe") The metric used to transform the input vector. Supported metrics are "woe" to choose the Weight of Evidence, "event_rate" to choose the event rate, "indices" to assign the corresponding indices of the bins and "bins" to assign the corresponding bin interval. metric_special : float or str (default=0) The metric value to transform special codes in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate and any numerical value. metric_missing : float or str (default=0) The metric value to transform missing values in the input vector. Supported metrics are "empirical" to use the empirical WoE or event rate and any numerical value. show_digits : int, optional (default=2) The number of significant digits of the bin column. Applies when ``metric="bins"``. check_input : bool (default=False) Whether to check input arrays. Returns ------- z_new : numpy array, shape = (n_samples,) Transformed array. """ self._check_is_fitted() return transform_binary_target( self._splits_x_optimal, self._splits_y_optimal, x, y, self._n_nonevent, self._n_event, self.special_codes_x, self.special_codes_y, metric, metric_special, metric_missing, show_digits, check_input) def _fit(self, x, y, z, check_input): time_init = time.perf_counter() if self.verbose: logger.info("Optimal binning started.") logger.info("Options: check parameters.") _check_parameters(**self.get_params()) # Pre-processing if self.verbose: logger.info("Pre-processing started.") self._n_samples = len(x) if self.verbose: logger.info("Pre-processing: number of samples: {}" .format(self._n_samples)) time_preprocessing = time.perf_counter() [x_clean, y_clean, z_clean, x_missing, y_missing, z_missing, x_special, y_special, z_special] = split_data_2d( self.dtype_x, self.dtype_y, x, y, z, self.special_codes_x, self.special_codes_y, check_input) self._time_preprocessing = time.perf_counter() - time_preprocessing if self.verbose: n_clean = len(x_clean) n_missing = len(x_missing) n_special = len(x_special) logger.info("Pre-processing: number of clean samples: {}" .format(n_clean)) logger.info("Pre-processing: number of missing samples: {}" .format(n_missing)) logger.info("Pre-processing: number of special samples: {}" .format(n_special)) if self.verbose: logger.info("Pre-processing terminated. Time: {:.4f}s" .format(self._time_preprocessing)) # Pre-binning if self.verbose: logger.info("Pre-binning started.") time_prebinning = time.perf_counter() splits_x = self._fit_prebinning(self.dtype_x, x_clean, z_clean, self.max_n_prebins_x, self.min_prebin_size_x) splits_y = self._fit_prebinning(self.dtype_y, y_clean, z_clean, self.max_n_prebins_y, self.min_prebin_size_y) NE, E = self._prebinning_matrices( splits_x, splits_y, x_clean, y_clean, z_clean, x_missing, y_missing, z_missing, x_special, y_special, z_special) if self.strategy == "cart": if self.verbose: logger.info("Prebinning: applying strategy cart...") n_splits_x = len(splits_x) n_splits_y = len(splits_y) clf_nodes = n_splits_x * n_splits_y indices_x = np.digitize(x_clean, splits_x, right=False) n_bins_x = n_splits_x + 1 indices_y = np.digitize(y_clean, splits_y, right=False) n_bins_y = n_splits_y + 1 xt = np.empty(len(x_clean), dtype=int) yt = np.empty(len(y_clean), dtype=int) for i in range(n_bins_x): xt[(indices_x == i)] = i for i in range(n_bins_y): yt[(indices_y == i)] = i xyt = np.c_[xt, yt] min_prebin_size = min(self.min_prebin_size_x, self.min_prebin_size_y) * 0.25 clf = DecisionTreeClassifier(min_samples_leaf=min_prebin_size, max_leaf_nodes=clf_nodes) clf.fit(xyt, z_clean) self._clf = clf self._time_prebinning = time.perf_counter() - time_prebinning self._n_prebins = E.size if self.verbose: logger.info("Pre-binning: number of prebins: {}" .format(self._n_prebins)) logger.info("Pre-binning terminated. Time: {:.4f}s" .format(self._time_prebinning)) # Optimization rows, n_nonevent, n_event = self._fit_optimizer( splits_x, splits_y, NE, E) # Post-processing if self.verbose: logger.info("Post-processing started.") logger.info("Post-processing: compute binning information.") time_postprocessing = time.perf_counter() # Refinements m, n = E.shape self._n_refinements = (m * n * (m + 1) * (n + 1)) // 4 - len(rows) # solution matrices D = np.empty(m * n, dtype=float) P = np.empty(m * n, dtype=int) selected_rows = np.array(rows, dtype=object)[self._solution] self._selected_rows = selected_rows self._m, self._n = m, n n_selected_rows = selected_rows.shape[0] + 2 opt_n_nonevent = np.empty(n_selected_rows, dtype=int) opt_n_event = np.empty(n_selected_rows, dtype=int) for i, r in enumerate(selected_rows): _n_nonevent = n_nonevent[self._solution][i] _n_event = n_event[self._solution][i] _event_rate = _n_event / (_n_event + _n_nonevent) P[r] = i D[r] = _event_rate opt_n_nonevent[i] = _n_nonevent opt_n_event[i] = _n_event opt_n_nonevent[-2] = self._n_nonevent_special opt_n_event[-2] = self._n_event_special opt_n_nonevent[-1] = self._n_nonevent_missing opt_n_event[-1] = self._n_event_missing self._n_nonevent = opt_n_nonevent self._n_event = opt_n_event D = D.reshape((m, n)) P = P.reshape((m, n)) # optimal bins bins_x = np.concatenate([[-np.inf], splits_x, [np.inf]]) bins_y = np.concatenate([[-np.inf], splits_y, [np.inf]]) bins_str_x = np.array([[bins_x[i], bins_x[i+1]] for i in range(len(bins_x) - 1)]) bins_str_y = np.array([[bins_y[i], bins_y[i+1]] for i in range(len(bins_y) - 1)]) splits_x_optimal = [] splits_y_optimal = [] for i in range(len(selected_rows)): pos_y, pos_x = np.where(P == i) mask_x = np.arange(pos_x.min(), pos_x.max() + 1) mask_y = np.arange(pos_y.min(), pos_y.max() + 1) bin_x = bins_str_x[mask_x] bin_y = bins_str_y[mask_y] splits_x_optimal.append([bin_x[0][0], bin_x[-1][1]]) splits_y_optimal.append([bin_y[0][0], bin_y[-1][1]]) self._splits_x_optimal = splits_x_optimal self._splits_y_optimal = splits_y_optimal self._binning_table = BinningTable2D( self.name_x, self.name_y, self.dtype_x, self.dtype_y, splits_x_optimal, splits_y_optimal, m, n, opt_n_nonevent, opt_n_event, D, P) self.name = "-".join((self.name_x, self.name_y)) self._time_postprocessing = time.perf_counter() - time_postprocessing if self.verbose: logger.info("Post-processing terminated. Time: {:.4f}s" .format(self._time_postprocessing)) self._time_total = time.perf_counter() - time_init if self.verbose: logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s" .format(self._status, self._time_total)) # Completed successfully self._is_fitted = True return self def _fit_prebinning(self, dtype, x, z, max_n_prebins, min_prebin_size): # Pre-binning algorithm min_bin_size = int(np.ceil(min_prebin_size * self._n_samples)) prebinning = PreBinning(method=self.prebinning_method, n_bins=max_n_prebins, min_bin_size=min_bin_size, problem_type=self._problem_type).fit(x, z) return prebinning.splits def _prebinning_matrices(self, splits_x, splits_y, x_clean, y_clean, z_clean, x_missing, y_missing, z_missing, x_special, y_special, z_special): z0 = z_clean == 0 z1 = ~z0 # Compute n_nonevent and n_event for special and missing special_target_info = target_info(z_special) self._n_nonevent_special = special_target_info[0] self._n_event_special = special_target_info[1] missing_target_info = target_info(z_missing) self._n_nonevent_missing = missing_target_info[0] self._n_event_missing = missing_target_info[1] n_splits_x = len(splits_x) n_splits_y = len(splits_y) indices_x = np.digitize(x_clean, splits_x, right=False) n_bins_x = n_splits_x + 1 indices_y = np.digitize(y_clean, splits_y, right=False) n_bins_y = n_splits_y + 1 E = np.empty((n_bins_y, n_bins_x), dtype=int) NE = np.empty((n_bins_y, n_bins_x), dtype=int) for i in range(n_bins_y): mask_y = (indices_y == i) for j in range(n_bins_x): mask_x = (indices_x == j) mask = mask_x & mask_y NE[i, j] = np.count_nonzero(z0 & mask) E[i, j] = np.count_nonzero(z1 & mask) return NE, E def _fit_optimizer(self, splits_x, splits_y, NE, E): if self.verbose: logger.info("Optimizer started.") time_init = time.perf_counter() # Min/max number of bins (bin size) if self.min_bin_size is not None: min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples)) else: min_bin_size = self.min_bin_size if self.max_bin_size is not None: max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples)) else: max_bin_size = self.max_bin_size # Number of threads n_jobs = effective_n_jobs(self.n_jobs) if self.verbose: logger.info("Optimizer: {} jobs.".format(n_jobs)) if self.monotonic_trend_x is None: logger.info( "Optimizer: monotonic trend x not set.") else: logger.info("Optimizer: monotonic trend x set to {}." .format(self.monotonic_trend_x)) if self.monotonic_trend_y is None: logger.info( "Optimizer: monotonic trend y not set.") else: logger.info("Optimizer: monotonic trend y set to {}." .format(self.monotonic_trend_x)) if self.solver == "cp": scale = int(1e6) optimizer = Binning2DCP( self.monotonic_trend_x, self.monotonic_trend_y, self.min_n_bins, self.max_n_bins, self.min_event_rate_diff_x, self.min_event_rate_diff_y, self.gamma, n_jobs, self.time_limit) elif self.solver == "mip": scale = None optimizer = Binning2DMIP( self.monotonic_trend_x, self.monotonic_trend_y, self.min_n_bins, self.max_n_bins, self.min_event_rate_diff_x, self.min_event_rate_diff_y, self.gamma, n_jobs, self.time_limit) if self.verbose: logger.info("Optimizer: model data...") time_model_data = time.perf_counter() if self.strategy == "cart": [n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y, event_rate, n_event, n_nonevent, n_records] = model_data_cart( self._clf, self.divergence, NE, E, self.monotonic_trend_x, self.monotonic_trend_y, scale, min_bin_size, max_bin_size, self.min_bin_n_event, self.max_bin_n_event, self.min_bin_n_nonevent, self.max_bin_n_nonevent) else: [n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y, event_rate, n_event, n_nonevent, n_records] = model_data( self.divergence, NE, E, self.monotonic_trend_x, self.monotonic_trend_y, scale, min_bin_size, max_bin_size, self.min_bin_n_event, self.max_bin_n_event, self.min_bin_n_nonevent, self.max_bin_n_nonevent) self._time_model_data = time.perf_counter() - time_model_data if self.verbose: logger.info("Optimizer: model data terminated. Time {:.4f}s" .format(self._time_model_data)) if self.verbose: logger.info("Optimizer: build model...") optimizer.build_model(n_grid, n_rectangles, cols, c, d_connected_x, d_connected_y, event_rate, n_records) if self.verbose: logger.info("Optimizer: solve...") status, solution = optimizer.solve() self._solution = solution self._optimizer, self._time_optimizer = solver_statistics( self.solver, optimizer.solver_) self._status = status self._time_solver = time.perf_counter() - time_init if self.verbose: logger.info("Optimizer terminated. Time: {:.4f}s" .format(self._time_solver)) self._cols = cols self._rows = rows self._c = c return rows, n_nonevent, n_event @property def splits(self): """List of optimal split points and bins for axis x and y. Returns ------- splits : (numpy.ndarray, numpy.ndarray) """ self._check_is_fitted() return (self._splits_x_optimal, self._splits_y_optimal) ``` #### File: binning/multidimensional/binning_statistics_2d.py ```python import matplotlib.pyplot as plt import numpy as np import pandas as pd from mpl_toolkits.axes_grid1 import make_axes_locatable from ...formatting import dataframe_to_string from ..binning_statistics import _check_build_parameters from ..binning_statistics import _check_is_built from ..binning_statistics import BinningTable from ..metrics import bayesian_probability from ..metrics import binning_quality_score from ..metrics import chi2_cramer_v from ..metrics import frequentist_pvalue from ..metrics import hhi from ..metrics import gini from ..metrics import hellinger from ..metrics import jeffrey from ..metrics import jensen_shannon from ..metrics import triangular def _bin_fmt(bin, show_digits): if np.isinf(bin[0]): return "({0:.{2}f}, {1:.{2}f})".format(bin[0], bin[1], show_digits) else: return "[{0:.{2}f}, {1:.{2}f})".format(bin[0], bin[1], show_digits) def bin_xy_str_format(bins_x, bins_y, show_digits): show_digits = 2 if show_digits is None else show_digits bins_xy = [] for bx, by in zip(bins_x, bins_y): _bx = _bin_fmt(bx, show_digits) _by = _bin_fmt(by, show_digits) bins_xy.append(r"{} $\cup$ {}".format(_bx, _by)) return bins_xy def bin_str_format(bins, show_digits): show_digits = 2 if show_digits is None else show_digits bin_str = [] for bin in bins: bin_str.append(_bin_fmt(bin, show_digits)) return bin_str class BinningTable2D(BinningTable): """Binning table to summarize optimal binning of two numerical variables with respect to a binary target. Parameters ---------- name_x : str, optional (default="") The name of variable x. name_y : str, optional (default="") The name of variable y. dtype_x : str, optional (default="numerical") The data type of variable x. Supported data type is "numerical" for continuous and ordinal variables. dtype_y : str, optional (default="numerical") The data type of variable y. Supported data type is "numerical" for continuous and ordinal variables. splits_x : numpy.ndarray List of split points for variable x. splits_y : numpy.ndarray List of split points for variable y. m : int Number of rows of the 2D array. n : int Number of columns of the 2D array. n_nonevent : numpy.ndarray Number of non-events. n_event : numpy.ndarray Number of events. D : numpy.ndarray Event rate 2D array. P : numpy-ndarray Records 2D array. Warning ------- This class is not intended to be instantiated by the user. It is preferable to use the class returned by the property ``binning_table`` available in all optimal binning classes. """ def __init__(self, name_x, name_y, dtype_x, dtype_y, splits_x, splits_y, m, n, n_nonevent, n_event, D, P): self.name_x = name_x self.name_y = name_y self.dtype_x = dtype_x self.dtype_y = dtype_y self.splits_x = splits_x self.splits_y = splits_y self.m = m self.n = n self.n_nonevent = n_nonevent self.n_event = n_event self.D = D self.P = P self._is_built = False self._is_analyzed = False def build(self, show_digits=2, show_bin_xy=False, add_totals=True): """Build the binning table. Parameters ---------- show_digits : int, optional (default=2) The number of significant digits of the bin column. show_bin_xy: bool (default=False) Whether to show a single bin column with x and y. add_totals : bool (default=True) Whether to add a last row with totals. Returns ------- binning_table : pandas.DataFrame """ _check_build_parameters(show_digits, add_totals) if not isinstance(show_bin_xy, bool): raise TypeError("show_bin_xy must be a boolean; got {}." .format(show_bin_xy)) n_nonevent = self.n_nonevent n_event = self.n_event n_records = n_event + n_nonevent t_n_nonevent = n_nonevent.sum() t_n_event = n_event.sum() t_n_records = t_n_nonevent + t_n_event t_event_rate = t_n_event / t_n_records p_records = n_records / t_n_records p_event = n_event / t_n_event p_nonevent = n_nonevent / t_n_nonevent mask = (n_event > 0) & (n_nonevent > 0) event_rate = np.zeros(len(n_records)) woe = np.zeros(len(n_records)) iv = np.zeros(len(n_records)) js = np.zeros(len(n_records)) # Compute weight of evidence and event rate event_rate[mask] = n_event[mask] / n_records[mask] constant = np.log(t_n_event / t_n_nonevent) woe[mask] = np.log(1 / event_rate[mask] - 1) + constant W = np.log(1 / self.D - 1) + constant # Compute Gini self._gini = gini(self.n_event, self.n_nonevent) # Compute divergence measures p_ev = p_event[mask] p_nev = p_nonevent[mask] iv[mask] = jeffrey(p_ev, p_nev, return_sum=False) js[mask] = jensen_shannon(p_ev, p_nev, return_sum=False) t_iv = iv.sum() t_js = js.sum() self._iv = t_iv self._js = t_js self._hellinger = hellinger(p_ev, p_nev, return_sum=True) self._triangular = triangular(p_ev, p_nev, return_sum=True) # Keep data for plotting self._n_records = n_records self._event_rate = event_rate self._woe = woe self._W = W # Compute KS self._ks = np.abs(p_event.cumsum() - p_nonevent.cumsum()).max() # Compute HHI self._hhi = hhi(p_records) self._hhi_norm = hhi(p_records, normalized=True) # Compute paths. This is required for both plot and analysis # paths x: horizontal self._paths_x = [] for i in range(self.m): path = tuple(dict.fromkeys(self.P[i, :])) if path not in self._paths_x: self._paths_x.append(path) # paths y: vertical self._paths_y = [] for j in range(self.n): path = tuple(dict.fromkeys(self.P[:, j])) if path not in self._paths_y: self._paths_y.append(path) if show_bin_xy: bin_xy_str = bin_xy_str_format(self.splits_x, self.splits_y, show_digits) bin_xy_str.extend(["Special", "Missing"]) df = pd.DataFrame({ "Bin": bin_xy_str, "Count": n_records, "Count (%)": p_records, "Non-event": n_nonevent, "Event": n_event, "Event rate": event_rate, "WoE": woe, "IV": iv, "JS": js }) else: bin_x_str = bin_str_format(self.splits_x, show_digits) bin_y_str = bin_str_format(self.splits_y, show_digits) bin_x_str.extend(["Special", "Missing"]) bin_y_str.extend(["Special", "Missing"]) df = pd.DataFrame({ "Bin x": bin_x_str, "Bin y": bin_y_str, "Count": n_records, "Count (%)": p_records, "Non-event": n_nonevent, "Event": n_event, "Event rate": event_rate, "WoE": woe, "IV": iv, "JS": js }) if add_totals: if show_bin_xy: totals = ["", t_n_records, 1, t_n_nonevent, t_n_event, t_event_rate, "", t_iv, t_js] else: totals = ["", "", t_n_records, 1, t_n_nonevent, t_n_event, t_event_rate, "", t_iv, t_js] df.loc["Totals"] = totals self._is_built = True return df def plot(self, metric="woe", savefig=None): """Plot the binning table. Visualize the non-event and event count, and the Weight of Evidence or the event rate for each bin. Parameters ---------- metric : str, optional (default="woe") Supported metrics are "woe" to show the Weight of Evidence (WoE) measure and "event_rate" to show the event rate. savefig : str or None (default=None) Path to save the plot figure. """ _check_is_built(self) if metric not in ("event_rate", "woe"): raise ValueError('Invalid value for metric. Allowed string ' 'values are "event_rate" and "woe".') if metric == "woe": metric_values = self._woe metric_matrix = self._W metric_label = "WoE" elif metric == "event_rate": metric_values = self._event_rate metric_matrix = self.D metric_label = "Event rate" fig, ax = plt.subplots(figsize=(7, 7)) divider = make_axes_locatable(ax) axtop = divider.append_axes("top", size=2.5, pad=0.1, sharex=ax) axright = divider.append_axes("right", size=2.5, pad=0.1, sharey=ax) # Hide x labels and tick labels for top plots and y ticks for # right plots. # Position [0, 0] for path in self._paths_x: er = sum([ [metric_values[p]] * np.count_nonzero( self.P == p, axis=1).max() for p in path], []) er = er + [er[-1]] axtop.step(np.arange(self.n + 1) - 0.5, er, label=path, where="post") for i in range(self.n): axtop.axvline(i + 0.5, color="grey", linestyle="--", alpha=0.5) axtop.get_xaxis().set_visible(False) axtop.set_ylabel(metric_label, fontsize=12) # Position [1, 0] pos = ax.matshow(metric_matrix, cmap=plt.cm.bwr) for j in range(self.n): for i in range(self.m): c = int(self.P[i, j]) ax.text(j, i, str(c), va='center', ha='center') fig.colorbar(pos, ax=ax, orientation="horizontal", fraction=0.025, pad=0.125) ax.xaxis.set_label_position("bottom") ax.xaxis.tick_bottom() ax.set_ylabel("Bin ID - y ({})".format(self.name_x), fontsize=12) ax.set_xlabel("Bin ID - x ({})".format(self.name_y), fontsize=12) # Position [1, 1] for path in self._paths_y: er = sum([ [metric_values[p]] * (np.count_nonzero( self.P == p, axis=0).max()) for p in path], []) er = er + [er[-1]] axright.step(er, np.arange(self.m + 1) - 0.5, label=path, where="pre") for j in range(self.m): axright.axhline(j - 0.5, color="grey", linestyle="--", alpha=0.5) axright.get_yaxis().set_visible(False) axright.set_xlabel(metric_label, fontsize=12) # adjust margins axright.margins(y=0) axtop.margins(x=0) plt.tight_layout() axtop.legend(bbox_to_anchor=(1, 1)) axright.legend(bbox_to_anchor=(1, 1)) if savefig is None: plt.show() else: if not isinstance(savefig, str): raise TypeError("savefig must be a string path; got {}." .format(savefig)) plt.savefig(savefig) plt.close() def analysis(self, pvalue_test="chi2", n_samples=100, print_output=True): """Binning table analysis. Statistical analysis of the binning table, computing the statistics Gini index, Information Value (IV), Jensen-Shannon divergence, and the quality score. Additionally, several statistical significance tests between consecutive bins of the contingency table are performed: a frequentist test using the Chi-square test or the Fisher's exact test, and a Bayesian A/B test using the beta distribution as a conjugate prior of the Bernoulli distribution. Parameters ---------- pvalue_test : str, optional (default="chi2") The statistical test. Supported test are "chi2" to choose the Chi-square test and "fisher" to choose the Fisher exact test. n_samples : int, optional (default=100) The number of samples to run the Bayesian A/B testing between consecutive bins to compute the probability of the event rate of bin A being greater than the event rate of bin B. print_output : bool (default=True) Whether to print analysis information. Notes ----- The Chi-square test uses `scipy.stats.chi2_contingency <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats. chi2_contingency.html>`_, and the Fisher exact test uses `scipy.stats.fisher_exact <https://docs.scipy.org/doc/scipy/reference/ generated/scipy.stats.fisher_exact.html>`_. """ pairs = set() for path in self._paths_x: tpairs = tuple(zip(path[:-1], path[1:])) for tp in tpairs: pairs.add(tp) for path in self._paths_y: tpairs = tuple(zip(path[:-1], path[1:])) for tp in tpairs: pairs.add(tp) pairs = sorted(pairs) # Significance tests n_bins = len(self._n_records) n_metric = n_bins - 2 n_nev = self.n_nonevent[:n_metric] n_ev = self.n_event[:n_metric] if len(n_nev) >= 2: chi2, cramer_v = chi2_cramer_v(n_nev, n_ev) else: cramer_v = 0 t_statistics = [] p_values = [] p_a_b = [] p_b_a = [] for pair in pairs: obs = np.array([n_nev[list(pair)], n_ev[list(pair)]]) t_statistic, p_value = frequentist_pvalue(obs, pvalue_test) pab, pba = bayesian_probability(obs, n_samples) p_a_b.append(pab) p_b_a.append(pba) t_statistics.append(t_statistic) p_values.append(p_value) df_tests = pd.DataFrame({ "Bin A": np.array([p[0] for p in pairs]), "Bin B": np.array([p[1] for p in pairs]), "t-statistic": t_statistics, "p-value": p_values, "P[A > B]": p_a_b, "P[B > A]": p_b_a }) if pvalue_test == "fisher": df_tests.rename(columns={"t-statistic": "odd ratio"}, inplace=True) tab = 4 if len(df_tests): df_tests_string = dataframe_to_string(df_tests, tab) else: df_tests_string = " " * tab + "None" # Quality score self._quality_score = binning_quality_score(self._iv, p_values, self._hhi_norm) report = ( "------------------------------------------------\n" "OptimalBinning: Binary Binning Table 2D Analysis\n" "------------------------------------------------\n" "\n" " General metrics" "\n\n" " Gini index {:>15.8f}\n" " IV (Jeffrey) {:>15.8f}\n" " JS (Jensen-Shannon) {:>15.8f}\n" " Hellinger {:>15.8f}\n" " Triangular {:>15.8f}\n" " KS {:>15.8f}\n" " HHI {:>15.8f}\n" " HHI (normalized) {:>15.8f}\n" " Cramer's V {:>15.8f}\n" " Quality score {:>15.8f}\n" "\n" " Significance tests\n\n{}\n" ).format(self._gini, self._iv, self._js, self._hellinger, self._triangular, self._ks, self._hhi, self._hhi_norm, cramer_v, self._quality_score, df_tests_string) if print_output: print(report) self._is_analyzed = True ``` #### File: binning/multidimensional/model_data_2d.py ```python import numpy as np from ..metrics import jeffrey from ..metrics import jensen_shannon from ..metrics import hellinger from ..metrics import triangular def _connected_rectangles(m, n, n_rectangles, monotonicity_x, monotonicity_y, rows, cols, outer_x, outer_y): d_connected_x = None d_connected_y = None if monotonicity_x is not None: connected_x = {n * i + j: n * i + j + 1 for i in range(m) for j in range(n - 1)} d_connected_x = {i: [] for i in range(n_rectangles)} if monotonicity_y is not None: connected_y = {n * i + j: n * i + j + n for i in range(m - 1) for j in range(n)} d_connected_y = {i: [] for i in range(n_rectangles)} set_pos = set(range(n_rectangles)) for i in range(n_rectangles): setr = set_pos - set().union(*(cols[r] for r in rows[i])) if monotonicity_x: hh = (connected_x[r] for r in outer_x[i] if r in connected_x) seth = set().union(*(cols[h] for h in hh)) d_connected_x[i] = seth & setr if monotonicity_y: vv = (connected_y[r] for r in outer_y[i] if r in connected_y) setv = set().union(*(cols[v] for v in vv)) d_connected_y[i] = setv & setr return d_connected_x, d_connected_y def model_data(divergence, NE, E, monotonicity_x, monotonicity_y, scale, min_bin_size, max_bin_size, min_bin_n_event, max_bin_n_event, min_bin_n_nonevent, max_bin_n_nonevent): # Compute all rectangles and event and non-event records m, n = E.shape n_grid = m * n fe = np.ravel(E) fne = np.ravel(NE) n_fe = [] n_fne = [] rows = [] cols = {c: [] for c in range(n_grid)} outer_x = [] outer_y = [] # Auxiliary checks is_min_bin_size = min_bin_size is not None is_max_bin_size = max_bin_size is not None is_min_bin_n_event = min_bin_n_event is not None is_max_bin_n_event = max_bin_n_event is not None is_min_bin_n_nonevent = min_bin_n_nonevent is not None is_max_bin_n_nonevent = max_bin_n_nonevent is not None # Cached rectangle shapes cached_rectangles = {} for k in range(1, m + 1): for l in range(1, n + 1): row = [n * ik + jl for ik in range(k) for jl in range(l)] cached_rectangles[(k, l)] = row n_rectangles = 0 for i in range(m): for j in range(n): w = n - j h = m - i p = i * n + j for k in range(1, h + 1): for l in range(1, w + 1): row = [p + r for r in cached_rectangles[(k, l)]] sfe = fe[row].sum() sfne = fne[row].sum() if sfe == 0 or sfne == 0: continue sn = sfe + sfne if is_min_bin_size and sn < min_bin_size: continue elif is_max_bin_size and sn > max_bin_size: continue elif is_min_bin_n_event and sfe < min_bin_n_event: continue elif is_max_bin_n_event and sfe > max_bin_n_event: continue elif is_min_bin_n_nonevent and sfne < min_bin_n_nonevent: continue elif is_max_bin_n_nonevent and sfne > max_bin_n_nonevent: continue for r in row: cols[r].append(n_rectangles) if monotonicity_x is not None: outer_x.append( [row[_i * l + (l - 1)] for _i in range(k)]) if monotonicity_y is not None: outer_y.append( [row[(k - 1) * l + _j] for _j in range(l)]) rows.append(row) n_fe.append(sfe) n_fne.append(sfne) n_rectangles += 1 n_event = np.array(n_fe) n_nonevent = np.array(n_fne) # Connected rectangles if monotonicity_x is not None or monotonicity_y is not None: d_connected_x, d_connected_y = _connected_rectangles( m, n, n_rectangles, monotonicity_x, monotonicity_y, rows, cols, outer_x, outer_y) else: d_connected_x = None d_connected_y = None # Event and non-event rate n_records = n_event + n_nonevent # Event rate and Information value event_rate = n_event / n_records p = n_event / E.sum() q = n_nonevent / NE.sum() if divergence == "iv": iv = jeffrey(p, q) elif divergence == "js": iv = jensen_shannon(p, q) elif divergence == "hellinger": iv = hellinger(p, q) elif divergence == "triangular": iv = triangular(p, q) if scale is not None: c = (iv * scale).astype(int) else: c = iv return (n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y, event_rate, n_event, n_nonevent, n_records) ``` #### File: binning/piecewise/binning_statistics.py ```python import numbers import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy import stats from ...binning.binning_statistics import _check_build_parameters from ...binning.binning_statistics import _check_is_built from ...binning.binning_statistics import bin_str_format from ...binning.binning_statistics import BinningTable from ...binning.metrics import bayesian_probability from ...binning.metrics import binning_quality_score from ...binning.metrics import continuous_binning_quality_score from ...binning.metrics import chi2_cramer_v from ...binning.metrics import frequentist_pvalue from ...binning.metrics import hhi from ...formatting import dataframe_to_string from .transformations import transform_binary_target from .transformations import transform_continuous_target class PWBinningTable(BinningTable): """Piecewise binning table to summarize optimal binning of a numerical variable with respecto a binary target. Parameters ---------- name : str, optional (default="") The variable name. splits : numpy.ndarray List of split points. coef : numpy.ndarray Coefficients for each bin. n_nonevent : numpy.ndarray Number of non-events. n_event : numpy.ndarray Number of events. min_x : float Mininum value of x. max_x : float Maxinum value of x. d_metrics : dict Dictionary of performance metrics. Warning ------- This class is not intended to be instantiated by the user. It is preferable to use the class returned by the property ``binning_table`` available in all optimal binning classes. """ def __init__(self, name, splits, coef, n_nonevent, n_event, min_x, max_x, d_metrics): self.name = name self.splits = splits self.coef = coef self.n_nonevent = n_nonevent self.n_event = n_event self.min_x = min_x self.max_x = max_x self.d_metrics = d_metrics self._n_records = None self._t_n_nonevent = None self._t_n_event = None self._hhi = None self._hhi_norm = None self._iv = None self._js = None self._gini = None self._quality_score = None self._ks = None self._is_built = False self._is_analyzed = False def build(self, show_digits=2, add_totals=True): """Build the binning table. Parameters ---------- show_digits : int, optional (default=2) The number of significant digits of the bin column. add_totals : bool (default=True) Whether to add a last row with totals. Returns ------- binning_table : pandas.DataFrame """ _check_build_parameters(show_digits, add_totals) n_nonevent = self.n_nonevent n_event = self.n_event self._t_n_nonevent = n_nonevent.sum() self._t_n_event = n_event.sum() n_records = n_event + n_nonevent t_n_records = self._t_n_nonevent + self._t_n_event p_records = n_records / t_n_records mask = (n_event > 0) & (n_nonevent > 0) event_rate = np.zeros(len(n_records)) event_rate[mask] = n_event[mask] / n_records[mask] self._n_records = n_records # Gini / IV / JS / hellinger / triangular / KS self._gini = self.d_metrics["Gini index"] self._iv = self.d_metrics["IV (Jeffrey)"] self._js = self.d_metrics["JS (Jensen-Shannon)"] self._hellinger = self.d_metrics["Hellinger"] self._triangular = self.d_metrics["Triangular"] self._ks = self.d_metrics["KS"] # Compute HHI self._hhi = hhi(p_records) self._hhi_norm = hhi(p_records, normalized=True) bins = np.concatenate([[-np.inf], self.splits, [np.inf]]) bin_str = bin_str_format(bins, show_digits) bin_str.extend(["Special", "Missing"]) df = pd.DataFrame({ "Bin": bin_str, "Count": n_records, "Count (%)": p_records, "Non-event": n_nonevent, "Event": n_event }) n_coefs = self.coef.shape[1] for i in range(n_coefs): if i == 0: n_nonevent_special = n_nonevent[-2] n_event_special = n_event[-2] if (n_event_special > 0) & (n_nonevent_special > 0): event_rate_special = n_event_special / n_records[-2] else: event_rate_special = 0 n_nonevent_missing = n_nonevent[-1] n_event_missing = n_event[-1] if (n_event_missing > 0) & (n_nonevent_missing > 0): event_rate_missing = n_event_missing / n_records[-1] else: event_rate_missing = 0 c_s_m = [event_rate_special, event_rate_missing] df["c{}".format(i)] = list(self.coef[:, i]) + c_s_m else: df["c{}".format(i)] = list(self.coef[:, i]) + [0, 0] if add_totals: totals = ["", t_n_records, 1, self._t_n_nonevent, self._t_n_event] totals += ["-"] * n_coefs df.loc["Totals"] = totals self._is_built = True return df def plot(self, metric="woe", add_special=True, add_missing=True, n_samples=10000, savefig=None): """Plot the binning table. Visualize the non-event and event count, and the predicted Weight of Evidence or event rate for each bin. Parameters ---------- metric : str, optional (default="woe") Supported metrics are "woe" to show the Weight of Evidence (WoE) measure and "event_rate" to show the event rate. add_special : bool (default=True) Whether to add the special codes bin. add_missing : bool (default=True) Whether to add the special values bin. n_samples : int (default=10000) Number of samples to be represented. savefig : str or None (default=None) Path to save the plot figure. """ if metric not in ("event_rate", "woe"): raise ValueError('Invalid value for metric. Allowed string ' 'values are "event_rate" and "woe".') if not isinstance(add_special, bool): raise TypeError("add_special must be a boolean; got {}." .format(add_special)) if not isinstance(add_missing, bool): raise TypeError("add_missing must be a boolean; got {}." .format(add_missing)) _n_nonevent = self.n_nonevent[:-2] _n_event = self.n_event[:-2] n_splits = len(self.splits) y_pos = np.empty(n_splits + 2) y_pos[0] = self.min_x y_pos[1:-1] = self.splits y_pos[-1] = self.max_x width = y_pos[1:] - y_pos[:-1] y_pos = y_pos[:-1] fig, ax1 = plt.subplots() p2 = ax1.bar(y_pos, _n_event, width, color="tab:red", align="edge") p1 = ax1.bar(y_pos, _n_nonevent, width, color="tab:blue", bottom=_n_event, align="edge") handles = [p1[0], p2[0]] labels = ['Non-event', 'Event'] ax1.set_xlabel("x", fontsize=12) ax1.set_ylabel("Bin count", fontsize=13) ax1.tick_params(axis='x', labelrotation=45) ax2 = ax1.twinx() x_samples = np.linspace(self.min_x, self.max_x, n_samples) metric_values = transform_binary_target( self.splits, x_samples, self.coef, 0, 1, self._t_n_nonevent, self._t_n_event, 0, 0, 0, 0, [], metric, 0, 0) if metric == "woe": metric_label = "WoE" elif metric == "event_rate": metric_label = "Event rate" for split in self.splits: ax2.axvline(x=split, color="darkgrey", linestyle="--") ax2.plot(x_samples, metric_values, linestyle="solid", color="black") ax2.set_ylabel(metric_label, fontsize=13) plt.title(self.name, fontsize=14) plt.legend(handles, labels, loc="upper center", bbox_to_anchor=(0.5, -0.2), ncol=2, fontsize=12) if savefig is None: plt.show() else: if not isinstance(savefig, str): raise TypeError("savefig must be a string path; got {}." .format(savefig)) plt.savefig(savefig) plt.close() def analysis(self, pvalue_test="chi2", n_samples=100, print_output=True): """Binning table analysis. Statistical analysis of the binning table, computing the statistics Gini index, Information Value (IV), Jensen-Shannon divergence, and the quality score. Additionally, several statistical significance tests between consecutive bins of the contingency table are performed: a frequentist test using the Chi-square test or the Fisher's exact test, and a Bayesian A/B test using the beta distribution as a conjugate prior of the Bernoulli distribution. Parameters ---------- pvalue_test : str, optional (default="chi2") The statistical test. Supported test are "chi2" to choose the Chi-square test and "fisher" to choose the Fisher exact test. n_samples : int, optional (default=100) The number of samples to run the Bayesian A/B testing between consecutive bins to compute the probability of the event rate of bin A being greater than the event rate of bin B. print_output : bool (default=True) Whether to print analysis information. Notes ----- The Chi-square test uses `scipy.stats.chi2_contingency <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats. chi2_contingency.html>`_, and the Fisher exact test uses `scipy.stats.fisher_exact <https://docs.scipy.org/doc/scipy/reference/ generated/scipy.stats.fisher_exact.html>`_. """ _check_is_built(self) if pvalue_test not in ("chi2", "fisher"): raise ValueError('Invalid value for pvalue_test. Allowed string ' 'values are "chi2" and "fisher".') if not isinstance(n_samples, numbers.Integral) or n_samples <= 0: raise ValueError("n_samples must be a positive integer; got {}." .format(n_samples)) # Significance tests n_bins = len(self._n_records) n_metric = n_bins - 2 n_nev = self.n_nonevent[:n_metric] n_ev = self.n_event[:n_metric] if len(n_nev) >= 2: chi2, cramer_v = chi2_cramer_v(n_nev, n_ev) else: cramer_v = 0 t_statistics = [] p_values = [] p_a_b = [] p_b_a = [] for i in range(n_metric-1): obs = np.array([n_nev[i:i+2] + 0.5, n_ev[i:i+2] + 0.5]) t_statistic, p_value = frequentist_pvalue(obs, pvalue_test) pab, pba = bayesian_probability(obs, n_samples) p_a_b.append(pab) p_b_a.append(pba) t_statistics.append(t_statistic) p_values.append(p_value) # Quality score self._quality_score = binning_quality_score(self._iv, p_values, self._hhi_norm) df_tests = pd.DataFrame({ "Bin A": np.arange(n_metric-1), "Bin B": np.arange(n_metric-1) + 1, "t-statistic": t_statistics, "p-value": p_values, "P[A > B]": p_a_b, "P[B > A]": p_b_a }) if pvalue_test == "fisher": df_tests.rename(columns={"t-statistic": "odd ratio"}, inplace=True) tab = 4 if len(df_tests): df_tests_string = dataframe_to_string(df_tests, tab) else: df_tests_string = " " * tab + "None" # Metrics metrics_string = "" for km, kv in self.d_metrics.items(): metrics_string += " {:<19} {:>15.8f}\n".format(km, kv) report = ( "---------------------------------------------\n" "OptimalBinning: Binary Binning Table Analysis\n" "---------------------------------------------\n" "\n" " General metrics" "\n\n" "{}" " HHI {:>15.8f}\n" " HHI (normalized) {:>15.8f}\n" " Cramer's V {:>15.8f}\n" " Quality score {:>15.8f}\n" "\n" " Significance tests\n\n{}\n" ).format(metrics_string, self._hhi, self._hhi_norm, cramer_v, self._quality_score, df_tests_string) if print_output: print(report) self._is_analyzed = True class PWContinuousBinningTable: """Piecewise binning table to summarize optimal binning of a numerical variable with respect to a continuous target. Parameters ---------- name : str, optional (default="") The variable name. splits : numpy.ndarray List of split points. coef : numpy.ndarray Coefficients for each bin. n_records : numpy.ndarray Number of records. sums : numpy.ndarray Target sums. stds : numpy.ndarray Target stds. min_target : numpy.ndarray Target mininum values. max_target : numpy.ndarray Target maxinum values. n_zeros : numpy.ndarray Number of zeros. min_x : float or None (default=None) Mininum value of x. max_x : float or None (default=None) Maxinum value of x. d_metrics : dict Dictionary of performance metrics. Warning ------- This class is not intended to be instantiated by the user. It is preferable to use the class returned by the property ``binning_table`` available in all optimal binning classes. """ def __init__(self, name, splits, coef, n_records, sums, stds, min_target, max_target, n_zeros, lb, ub, min_x, max_x, d_metrics): self.name = name self.splits = splits self.coef = coef self.n_records = n_records self.sums = sums self.stds = stds self.min_target = min_target self.max_target = max_target self.n_zeros = n_zeros self.lb = lb self.ub = ub self.min_x = min_x self.max_x = max_x self.d_metrics = d_metrics self._mean = None self._hhi = None self._hhi_norm = None self._is_built = False self._is_analyzed = False def build(self, show_digits=2, add_totals=True): """Build the binning table. Parameters ---------- show_digits : int, optional (default=2) The number of significant digits of the bin column. add_totals : bool (default=True) Whether to add a last row with totals. Returns ------- binning_table : pandas.DataFrame """ _check_build_parameters(show_digits, add_totals) t_n_records = np.nansum(self.n_records) t_sum = np.nansum(self.sums) p_records = self.n_records / t_n_records mask = (self.n_records > 0) self._mean = np.zeros(len(self.n_records)) self._mean[mask] = self.sums[mask] / self.n_records[mask] if self.n_records[-1] > 0: self._mean[-1] = 0 # Compute HHI self._hhi = hhi(p_records) self._hhi_norm = hhi(p_records, normalized=True) bins = np.concatenate([[-np.inf], self.splits, [np.inf]]) bin_str = bin_str_format(bins, show_digits) bin_str.extend(["Special", "Missing"]) df = pd.DataFrame({ "Bin": bin_str, "Count": self.n_records, "Count (%)": p_records, "Sum": self.sums, "Std": self.stds, "Min": self.min_target, "Max": self.max_target, "Zeros count": self.n_zeros, }) n_coefs = self.coef.shape[1] for i in range(n_coefs): if i == 0: n_records_special = self.n_records[-2] if n_records_special > 0: meam_special = self.sums[-2] / self.n_records[-2] else: meam_special = 0 n_records_missing = self.n_records[-1] if n_records_missing > 0: mean_missing = self.sums[-1] / self.n_records[-1] else: mean_missing = 0 c_s_m = [meam_special, mean_missing] df["c{}".format(i)] = list(self.coef[:, i]) + c_s_m else: df["c{}".format(i)] = list(self.coef[:, i]) + [0, 0] if add_totals: t_min = np.nanmin(self.min_target) t_max = np.nanmax(self.max_target) t_n_zeros = self.n_zeros.sum() totals = ["", t_n_records, 1, t_sum, "", t_min, t_max, t_n_zeros] totals += ["-"] * n_coefs df.loc["Totals"] = totals self._is_built = True return df def plot(self, add_special=True, add_missing=True, n_samples=10000, savefig=None): """Plot the binning table. Visualize records count and the prediction for each bin. Parameters ---------- add_special : bool (default=True) Whether to add the special codes bin. add_missing : bool (default=True) Whether to add the special values bin. n_samples : int (default=10000) Number of samples to be represented. savefig : str or None (default=None) Path to save the plot figure. """ _n_records = self.n_records[:-2] n_splits = len(self.splits) y_pos = np.empty(n_splits + 2) y_pos[0] = self.min_x y_pos[1:-1] = self.splits y_pos[-1] = self.max_x width = y_pos[1:] - y_pos[:-1] y_pos = y_pos[:-1] fig, ax1 = plt.subplots() p1 = ax1.bar(y_pos, _n_records, width, color="tab:blue", align="edge") handles = [p1[0]] labels = ['Count'] ax1.set_xlabel("x", fontsize=12) ax1.set_ylabel("Bin count", fontsize=13) ax1.tick_params(axis='x', labelrotation=45) ax2 = ax1.twinx() x_samples = np.linspace(self.min_x, self.max_x, n_samples) metric_values = transform_continuous_target( self.splits, x_samples, self.coef, self.lb, self.ub, 0, 0, 0, 0, [], 0, 0) metric_label = "Mean" for split in self.splits: ax2.axvline(x=split, color="darkgrey", linestyle="--") ax2.plot(x_samples, metric_values, linestyle="solid", color="black") ax2.set_ylabel(metric_label, fontsize=13) plt.title(self.name, fontsize=14) plt.legend(handles, labels, loc="upper center", bbox_to_anchor=(0.5, -0.2), ncol=2, fontsize=12) if savefig is None: plt.show() else: if not isinstance(savefig, str): raise TypeError("savefig must be a string path; got {}." .format(savefig)) plt.savefig(savefig) plt.close() def analysis(self, print_output=True): """Binning table analysis. Statistical analysis of the binning table, computing the Information Value (IV) and Herfindahl-Hirschman Index (HHI). Parameters ---------- print_output : bool (default=True) Whether to print analysis information. """ _check_is_built(self) # Significance tests n_bins = len(self.n_records) n_metric = n_bins - 2 n_records = self.n_records[:n_metric] mean = self._mean[:n_metric] std = self.stds[:n_metric] t_statistics = [] p_values = [] for i in range(n_metric-1): u, u2 = mean[i], mean[i+1] s, s2 = std[i], std[i+1] r, r2 = n_records[i], n_records[i+1] t_statistic, p_value = stats.ttest_ind_from_stats( u, s, r, u2, s2, r2, False) t_statistics.append(t_statistic) p_values.append(p_value) df_tests = pd.DataFrame({ "Bin A": np.arange(n_metric-1), "Bin B": np.arange(n_metric-1) + 1, "t-statistic": t_statistics, "p-value": p_values }) tab = 4 if len(df_tests): df_tests_string = dataframe_to_string(df_tests, tab) else: df_tests_string = " " * tab + "None" # Quality score self._quality_score = continuous_binning_quality_score( p_values, self._hhi_norm) # Metrics metrics_string = "" for km, kv in self.d_metrics.items(): metrics_string += " {:<21} {:>21.8f}\n".format(km, kv) report = ( "-------------------------------------------------\n" "OptimalBinning: Continuous Binning Table Analysis\n" "-------------------------------------------------\n" "\n" " General metrics" "\n\n" "{}" " HHI {:>21.8f}\n" " HHI (normalized) {:>21.8f}\n" " Quality score {:>21.8f}\n" "\n" " Significance tests\n\n{}\n" ).format(metrics_string, self._hhi, self._hhi_norm, self._quality_score, df_tests_string) if print_output: print(report) ``` #### File: optbinning/optbinning/information.py ```python import numpy as np from sklearn.base import BaseEstimator from ._version import __version__ try: from localsolver import LSStatistics LOCALSOLVER_AVAILABLE = True except ImportError: LOCALSOLVER_AVAILABLE = False def print_header(): header = ( "optbinning (Version {})\n" "Copyright (c) 2019-2022 <NAME>, Apache License 2.0" "\n".format(__version__)) print(header) def print_optional_parameters(dict_default_options, dict_user_options): option_format = " {:<24} {:>15} * {}\n" str_options = " Begin options\n" for key, value in dict_default_options.items(): user_value = dict_user_options[key] if (isinstance(user_value, (list, np.ndarray, dict)) or value != user_value): user_flag = "U" else: user_flag = "d" if user_value is None: user_value = "no" elif isinstance(user_value, (list, np.ndarray, dict)): user_value = "yes" elif isinstance(user_value, BaseEstimator): user_value = "yes" str_options += option_format.format(key, str(user_value), user_flag) str_options += " End options\n" print(str_options) def solver_statistics(solver_type, solver): time_optimizer = None d_solver = {} if solver_type == "cp": d_solver["n_booleans"] = solver.NumBooleans() d_solver["n_branches"] = solver.NumBranches() d_solver["n_conflicts"] = solver.NumConflicts() d_solver["objective"] = int(solver.ObjectiveValue()) d_solver["best_objective_bound"] = int(solver.BestObjectiveBound()) time_optimizer = solver.WallTime() elif solver_type == "mip": d_solver["n_constraints"] = solver.NumConstraints() d_solver["n_variables"] = solver.NumVariables() d_solver["objective"] = solver.Objective().Value() d_solver["best_bound"] = solver.Objective().BestBound() elif solver_type == "ls": if not LOCALSOLVER_AVAILABLE: raise ImportError('Cannot import localsolver. Install LocalSolver ' 'or choose another solver, options are "cp" and ' '"mip".') d_solver["n_iterations"] = LSStatistics.get_nb_iterations( solver.statistics) elif solver_type == "lp": d_solver["n_variables"] = solver.n_variables d_solver["n_constraints"] = solver.n_constraints d_solver["n_iterations"] = solver.n_iterations d_solver["objective"] = solver.objective return d_solver, time_optimizer def print_solver_statistics(solver_type, d_solver): if solver_type == "cp": solver_stats = ( " Solver statistics\n" " Type {:>10}\n" " Number of booleans {:>10}\n" " Number of branches {:>10}\n" " Number of conflicts {:>10}\n" " Objective value {:>10}\n" " Best objective bound {:>10}\n" ).format(solver_type, *d_solver.values()) elif solver_type == "mip": solver_stats = ( " Solver statistics\n" " Type {:>10}\n" " Number of variables {:>10}\n" " Number of constraints {:>10}\n" " Objective value {:>10.4f}\n" " Best objective bound {:>10.4f}\n" ).format(solver_type, *d_solver.values()) elif solver_type == "ls": solver_stats = ( " Solver statistics\n" " Type {:>10}\n" " Number of iterations {:>10}\n" ).format(solver_type, *d_solver.values()) elif solver_type == "lp": solver_stats = ( " Solver statistics\n" " Type {:>10}\n" " Number of variables {:>10}\n" " Number of constraints {:>10}\n" " Number of iterations {:>10}\n" " Objective value {:>10.4f}\n" ).format(solver_type, *d_solver.values()) print(solver_stats) ``` #### File: optbinning/metrics/regression.py ```python import numpy as np from sklearn.metrics import explained_variance_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score def mean_absolute_percentage_error(y_true, y_pred): """Compute the mean absolute percentage error (MAPE). Parameters ---------- y_true : array-like, shape (n_samples,) Ground truth (correct) target values. y_pred : array-like, shape (n_samples,) Estimated target values. Returns ------- mape : float """ return np.abs((y_true - y_pred) / y_true).mean() def median_absolute_percentage_error(y_true, y_pred): """Compute the median absolute percentage error (MdAPE). Parameters ---------- y_true : array-like, shape (n_samples,) Ground truth (correct) target values. y_pred : array-like, shape (n_samples,) Estimated target values. Returns ------- mdape : float """ return np.median(np.abs((y_true - y_pred) / y_true)) def mean_percentage_error(y_true, y_pred): """Compute the mean percentage error (MPE). Parameters ---------- y_true : array-like, shape (n_samples,) Ground truth (correct) target values. y_pred : array-like, shape (n_samples,) Estimated target values. Returns ------- mpe : float """ return ((y_true - y_pred) / y_true).mean() def symmetric_mean_absolute_percentage_error(y_true, y_pred): """Compute the symmetric mean absolute percentage error (SMAPE). Parameters ---------- y_true : array-like, shape (n_samples,) Ground truth (correct) target values. y_pred : array-like, shape (n_samples,) Estimated target values. Returns ------- smape : float """ e = np.abs(y_true - y_pred) return (e / (np.abs(y_true) + np.abs(y_pred))).mean() def symmetric_median_absolute_percentage_error(y_true, y_pred): """Compute the symmetric median absolute percentage error (SMdAPE). Parameters ---------- y_true : array-like, shape (n_samples,) Ground truth (correct) target values. y_pred : array-like, shape (n_samples,) Estimated target values. Returns ------- smdape : float """ e = np.abs(y_true - y_pred) return np.median(e / (np.abs(y_true) + np.abs(y_pred))) def regression_metrics(y_true, y_pred): """Compute regression metrics. Parameters ---------- y_true : array-like, shape (n_samples,) Ground truth (correct) target values. y_pred : array-like, shape (n_samples,) Estimated target values. Returns ------- metrics : dict Dictionary of metrics. """ # Explained variance variance = explained_variance_score(y_true, y_pred) # Mean absolute error mae = mean_absolute_error(y_true, y_pred) # Mean squared error mse = mean_squared_error(y_true, y_pred) # Median absolute error median_ae = median_absolute_error(y_true, y_pred) # R^2 score r2 = r2_score(y_true, y_pred) # Mean absolute percentage error mape = mean_absolute_percentage_error(y_true, y_pred) # Mean percentage error mpe = mean_percentage_error(y_true, y_pred) # Symmetric mean absolute percentage error smape = symmetric_mean_absolute_percentage_error(y_true, y_pred) # Median absolute percentage error mdape = median_absolute_percentage_error(y_true, y_pred) # Symmetric meadian absolute percentage error smdape = symmetric_median_absolute_percentage_error(y_true, y_pred) d_metrics = { "Mean absolute error": mae, "Mean squared error": mse, "Median absolute error": median_ae, "Explained variance": variance, "R^2": r2, "MPE": mpe, "MAPE": mape, "SMAPE": smape, "MdAPE": mdape, "SMdAPE": smdape } return d_metrics ``` #### File: scorecard/counterfactual/counterfactual.py ```python import numbers import time import numpy as np import pandas as pd from ...information import solver_statistics from ...logging import Logger from ..scorecard import Scorecard from .base import BaseCounterfactual from .counterfactual_information import print_counterfactual_information from .mip import CFMIP from .model_data import model_data from .multi_mip import MCFMIP from .problem_data import problem_data logger = Logger(__name__).logger OBJECTIVES = ("proximity", "closeness") HARD_CONSTRAINTS = { "binary": ("diversity_features", "diversity_values"), "probability": ("diversity_features", "diversity_values", "min_outcome", "max_outcome"), "continuous": ("diversity_features", "diversity_values", "min_outcome", "max_outcome") } SOFT_CONSTRAINTS = { "binary": ("diversity_features", "diversity_values"), "probability": ("diversity_features", "diversity_values", "diff_outcome"), "continuous": ("diversity_features", "diversity_values", "diff_outcome") } def _check_parameters(scorecard, special_missing, n_jobs, verbose): # Check scorecard if not isinstance(scorecard, Scorecard): raise TypeError("scorecard must be a Scorecard instance.") scorecard._check_is_fitted() if not isinstance(special_missing, bool): raise TypeError("special_missing must be a boolean; got {}." .format(special_missing)) if not isinstance(n_jobs, numbers.Integral) or n_jobs <= 0: raise ValueError("n_jobs must be a positive integer; got {}." .format(n_jobs)) if not isinstance(verbose, bool): raise TypeError("verbose must be a boolean; got {}.".format(verbose)) def _check_generate_params(query, y, outcome_type, n_cf, method, objectives, max_changes, actionable_features, hard_constraints, soft_constraints, variable_names, target_dtype): # Check query if not isinstance(query, (dict, pd.DataFrame)): raise TypeError("query must be a dict or a pandas.DataFrame.") # Check target if not isinstance(y, numbers.Number): raise TypeError("y must be numeric.") # Check target and outcome type if target_dtype == "binary": if outcome_type not in ("binary", "probability"): raise ValueError("outcome_type must either binary or probability " "if target_dtype=binary; got {}." .format(outcome_type)) elif outcome_type == "binary" and y not in [0, 1]: raise ValueError("y must be either 0 or 1 if outcome_type=binary; " "got {}.".format(y)) elif outcome_type == "probability" and not 0 <= y <= 1: raise ValueError("y must be in [0, 1] if outcome_type=probability " "; got {}.".format(y)) elif target_dtype == "continuous": if outcome_type != "continuous": raise ValueError("outcome_type must be continuous if " "target_dtype=continuous; got {}." .format(outcome_type)) # Check number of counterfactuals if not isinstance(n_cf, numbers.Integral) or n_cf <= 0: raise ValueError("n_cf must be a positive integer; got {}." .format(n_cf)) if max_changes is not None: if not isinstance(max_changes, numbers.Integral) or max_changes <= 0: raise ValueError("max_changes must be a positive integer; got {}." .format(max_changes)) # Check actionable features if actionable_features is not None: if not isinstance(actionable_features, (list, np.ndarray)): raise TypeError("actionable_features must be either a list or " "a numpy.ndarray.") for av in actionable_features: if av not in variable_names: raise ValueError("actionable feature {} is not in {}." .format(av, variable_names)) # Check method and constraints _check_objectives_method_constraints( method, objectives, hard_constraints, soft_constraints, outcome_type) def _check_objectives_method_constraints(method, objectives, hard_constraints, soft_constraints, outcome_type): # Check types if method not in ("weighted", "hierarchical"): raise ValueError('Invalid value for method. Allowed string values are ' '"weighted" and "hierarchical".') if objectives is not None: if not isinstance(objectives, dict): raise TypeError("objectives must be a dict.") if not len(objectives): raise ValueError("objectives cannot be empty.") for obj, value in objectives.items(): if obj not in OBJECTIVES: raise ValueError("objective names must be in {}; got {}." .format(OBJECTIVES, obj)) elif not isinstance(value, numbers.Number) or value <= 0: raise ValueError("objective values must be positive; got {}." .format({obj, value})) if hard_constraints is not None: if not isinstance(hard_constraints, (list, tuple, np.ndarray)): raise TypeError("hard_constraints must a list, tuple or " "numpy.ndarray.") if len(hard_constraints) != len(set(hard_constraints)): raise ValueError("hard_constraints cannot be repeated.") for hc in hard_constraints: if hc not in HARD_CONSTRAINTS[outcome_type]: raise ValueError( "Invalid hard constraint for outcome_type={}. Allowed " "strings values are {}.".format( outcome_type, HARD_CONSTRAINTS[outcome_type])) if soft_constraints is not None: if not isinstance(soft_constraints, dict): raise TypeError("soft_constraints must be a dict.") for sc, value in soft_constraints.items(): if sc not in SOFT_CONSTRAINTS[outcome_type]: raise ValueError( "Invalid soft constraint for outcome_type={}. Allowed " "string values are {}.".format( outcome_type, SOFT_CONSTRAINTS[outcome_type])) elif not isinstance(value, numbers.Number) or value <= 0: raise ValueError("soft constraint values must be positive; " "got {}.".format({sc, value})) # Check combination of hard and soft constraints for outcome type # probability and continuous. Al least one of: # [min_outcome, max_outcome, diff_outcome] # must be included. if outcome_type in ("probability", "continuous"): if hard_constraints is None and soft_constraints is None: raise ValueError("If outcome_type is either probability or " "continuous, at least one hard constraint or" "soft constraint must be provided.") # check number of suitable constraints _scons = ("min_outcome", "max_outcome", "diff_outcome") _hard = list(hard_constraints) if hard_constraints is not None else [] _soft = list(soft_constraints) if soft_constraints is not None else [] _hard_soft = np.array(_hard + _soft) _selected = np.array([c in _scons for c in _hard_soft]) n_selected = np.count_nonzero(_selected) if n_selected == 0: raise ValueError('If outcome_type={}, at least one of the ' 'hard_constraints "min_outcome", "max_outcome" ' 'or the soft_constraint "diff_outcome" must be ' 'selected.'.format(outcome_type)) class Counterfactual(BaseCounterfactual): """Optimal counterfactual explanations given a scorecard model. Parameters ---------- scorecard : object A ``Scorecard`` instance. special_missing : bool (default=False) Whether the special and missing bin are considered as valid counterfactual values. n_jobs : int, optional (default=1) Number of cores to run the optimization solver. verbose : bool (default=False) Enable verbose output. """ def __init__(self, scorecard, special_missing=False, n_jobs=1, verbose=False): self.scorecard = scorecard self.special_missing = special_missing self.n_jobs = n_jobs self.verbose = verbose # auxiliary self._cfs = None # info self._optimizer = None self._status = None # timing self._time_fit = None self._time_solver = None self._time_postprocessing = None # flags self._is_fitted = False self._is_generated = False def fit(self, X): """Fit counterfactual. Compute problem data to generate counterfactual explanations. Parameters ---------- X : pandas.DataFrame (n_samples, n_features) Training vector, where n_samples is the number of samples. Returns ------- self : Counterfactual Fitted counterfactual. """ time_init = time.perf_counter() if self.verbose: logger.info("Counterfactual fit started.") logger.info("Options: check parameters.") _check_parameters(**self.get_params(deep=False)) if not isinstance(X, pd.DataFrame): raise TypeError("X must be a pandas.DataFrame.") # Scorecard selected variables self._variable_names = self.scorecard.binning_process_.get_support( names=True) for v in self._variable_names: if v not in X.columns: raise ValueError("Variable {} not in X. X must include {}." .format(v, self._variable_names)) if self.verbose: logger.info("Compute optimization problem data.") # Problem data intercept, coef, min_p, max_p, wrange, F, mu = problem_data( self.scorecard, X[self._variable_names]) self._intercept = intercept self._coef = coef self._min_p = min_p self._max_p = max_p self._wrange = wrange self._F = F self._mu = mu self._time_fit = time.perf_counter() - time_init if self.verbose: logger.info("Counterfactual fit terminated. Time: {:.4f}s" .format(self._time_fit)) self._is_fitted = True self._is_generated = False return self def information(self, print_level=1): """Print overview information about the options settings and statistics. Parameters ---------- print_level : int (default=1) Level of details. """ self._check_is_generated() if not isinstance(print_level, numbers.Integral) or print_level < 0: raise ValueError("print_level must be an integer >= 0; got {}." .format(print_level)) if self._optimizer is not None: solver, _ = solver_statistics("mip", self._optimizer.solver_) objectives = self._optimizer._objectives time_solver = self._time_solver else: solver = None objectives = None time_solver = 0 time_total = self._time_fit + time_solver + self._time_postprocessing dict_user_options = self.get_params(deep=False) print_counterfactual_information( print_level, self._status, solver, objectives, time_total, self._time_fit, time_solver, self._time_postprocessing, dict_user_options) def generate(self, query, y, outcome_type, n_cf, method="weighted", objectives=None, max_changes=None, actionable_features=None, hard_constraints=None, soft_constraints=None, priority_tol=0.1, time_limit=10): """Generate counterfactual explanations given objectives and constraints. Parameters ---------- query : dict or pandas.DataFrame Input data points for which a single or multiple counterfactual explanations are to be generated. y : int or float Desired outcome. outcome_type : str Desired outcome type. Supported outcome types are "binary", "probability" and "continuous". n_cf : int Number of counterfactuals to be generated. method : str (default="weighted") Multi-objective optimization method. Supported methods are "weighted" and "hierarchical". objectives : dict or None (default=None) Objectives with their corresponding weights or priorities, depending on the method. max_changes : int or None (default=None) Maximum number of features to be changed. If None, the maximum number of changes is half of the number of features. actionable_features : array-like or None (default=None) List of actionable features. If None. all features are suitable to be changed. hard_constraints : array-like or None (default=None) Constraint to be enforced when solving the underlying optimization problem. soft_constraints : dict or None (default=None) Constraints to be moved to the objective function as a penalization term. priority_tol : float, optional (default=0.1) Relative tolerance when solving the multi-objective optimization problem with ``method="hierarchical"``. time_limit : int (default=10) The maximum time in seconds to run the optimization solver. Returns ------- self : Counterfactual Generated counterfactuals. """ time_init = time.perf_counter() self._check_is_fitted() if self.verbose: logger.info("Counterfactual generation started.") logger.info("Options: check parameters.") # Check parameters _check_generate_params( query, y, outcome_type, n_cf, method, objectives, max_changes, actionable_features, hard_constraints, soft_constraints, self._variable_names, self.scorecard._target_dtype) # Check priority tolerance if (not isinstance(priority_tol, numbers.Number) or not 0 <= priority_tol <= 1): raise ValueError("priority_tol must be in [0, 1]; got {}." .format(priority_tol)) # Check time limit if not isinstance(time_limit, numbers.Number) or time_limit <= 0: raise ValueError("time_limit must be a positive value in seconds; " "got {}.".format(time_limit)) # Transform query using scorecard binning process x, query = self._transform_query(query) if self.verbose: logger.info("Options: check objectives and constraints.") # Set default objectives if objectives is None: if method == "weighted": _objectives = dict(zip(OBJECTIVES, (1, 1))) else: _objectives = dict(zip(OBJECTIVES, (2, 1))) else: _objectives = objectives # Set max changes if max_changes is None: _max_changes = len(self._variable_names) // 2 else: _max_changes = max_changes # Clean constraints given the number of counterfactuals _hard_constraints, _soft_constraints = self._prepare_constraints( outcome_type, n_cf, hard_constraints, soft_constraints) # Indices of non actionable features non_actionable = self._non_actionable_indices(actionable_features) # Optimization problem if self.verbose: logger.info("Optimizer started.") time_solver = time.perf_counter() if n_cf == 1: optimizer = CFMIP(method, _objectives, _max_changes, non_actionable, _hard_constraints, _soft_constraints, priority_tol, self.n_jobs, time_limit) else: optimizer = MCFMIP(n_cf, method, _objectives, _max_changes, non_actionable, _hard_constraints, _soft_constraints, priority_tol, self.n_jobs, time_limit) # Problem data. Indices is required to construct counterfactual if self.verbose: logger.info("Optimizer: build model...") nbins, metric, indices = model_data( self.scorecard, x, self.special_missing) optimizer.build_model(self.scorecard, x, y, outcome_type, self._intercept, self._coef, self._min_p, self._max_p, self._wrange, self._F, self._mu, nbins, metric) # Optimization if self.verbose: logger.info("Optimizer: solve...") status, solution = optimizer.solve() self._status = status self._optimizer = optimizer self._time_solver = time.perf_counter() - time_solver if self.verbose: logger.info("Optimizer terminated. Time: {:.4f}s" .format(self._time_solver)) # Post-processing if self.verbose: logger.info("Post-processing started.") time_postprocessing = time.perf_counter() if status in ("OPTIMAL", "FEASIBLE"): cfs = [] sc = self.scorecard.table() if n_cf == 1: new_indices, new_query, score = self._get_counterfactual( query, sc, x, nbins, metric, indices, solution) cfs.append({"outcome_type": outcome_type, "query": new_query, "score": score, "features": new_indices.keys()}) else: for k in range(n_cf): new_indices, new_query, score = self._get_counterfactual( query, sc, x, nbins, metric, indices, solution[k]) cfs.append({"outcome_type": outcome_type, "query": new_query, "score": score, "features": new_indices.keys()}) else: cfs = None self._cfs = cfs self._time_postprocessing = time.perf_counter() - time_postprocessing if self.verbose: logger.info("Post-processing terminated. Time: {:.4f}s" .format(self._time_postprocessing)) self._time_total = time.perf_counter() - time_init if self.verbose: logger.info("Counterfactual generation terminated. Status: {}. " "Time: {:.4f}s".format(self._status, self._time_total)) # Completed successfully self._is_generated = True return self def display(self, show_only_changes=False, show_outcome=False): """Display the generatedcounterfactual explanations. Parameters ---------- show_only_changes : boolean (default=False) Whether to show only changes on feature values. show_outcome : boolean (default=False) Whether to add a column with the scorecard outcome. If ``outcome_type`` is "binary" or "probability", the estimated probability of the counterfactual is added. Returns ------- counterfactuals : pandas.DataFrame Counterfactual explanations. """ self._check_is_generated() self._check_counterfactual_is_found() if not isinstance(show_only_changes, bool): raise TypeError("show_only_changes must be a boolean; got {}." .format(show_only_changes)) if not isinstance(show_outcome, bool): raise TypeError("show_outcome must be a boolean; got {}." .format(show_outcome)) cf_queries = [] for cf in self._cfs: cf_query = cf["query"].copy() if show_only_changes: cf_features = cf["features"] for v in cf_query.columns: if v not in cf_features: cf_query[v] = "-" if show_outcome: outcome_type = cf["outcome_type"] if outcome_type == "continuous": cf_query["outcome"] = cf["score"] else: cf_score = cf["score"] cf_query["outcome"] = 1.0 / (1.0 + np.exp(-cf_score)) cf_queries.append(cf_query) return pd.concat(cf_queries) def _get_counterfactual(self, query, sc, x, nbins, metric, indices, solution): new_indices = {} score = 0 for i, v in enumerate(self._variable_names): new_index = np.array(indices[i])[solution[i]] if len(new_index): new_indices[v] = new_index new_metric = x[i] + np.sum( [(metric[i][j] - x[i]) * solution[i][j] for j in range(nbins[i])]) score += self._coef[i] * new_metric score += self._intercept new_query = query.copy() for v, index in new_indices.items(): new_query[v] = sc[sc["Variable"] == v]["Bin"][index].values return new_indices, new_query, score def _transform_query(self, query): if isinstance(query, dict): query = pd.DataFrame.from_dict(query, orient="index").T x = self.scorecard.binning_process_.transform( query[self._variable_names]).values.ravel() return x, query def _prepare_constraints(self, outcome_type, n_cf, hard_constraints, soft_constraints): # Remove diversity_features and diversity_values if n_cf == 1. diversity_constraints = ["diversity_features", "diversity_values"] if hard_constraints is None: hard_cons = {} elif n_cf == 1: hard_cons = [c for c in hard_constraints if c not in diversity_constraints] else: hard_cons = hard_constraints if soft_constraints is None: soft_cons = {} elif n_cf == 1: soft_cons = {c: v for c, v in soft_constraints.items() if c not in diversity_constraints} else: soft_cons = soft_constraints return hard_cons, soft_cons def _non_actionable_indices(self, actionable_features): non_actionable = [] if actionable_features is not None: for i, av in enumerate(self._variable_names): if av not in actionable_features: non_actionable.append(i) return non_actionable @property def status(self): """The status of the underlying optimization solver. Returns ------- status : str """ self._check_is_generated() return self._status ``` #### File: scorecard/counterfactual/problem_data.py ```python import numpy as np def problem_data(scorecard, X): s_vars = X.columns n_vars = X.shape[1] # Scorecard table sc = scorecard.table(style="detailed") if scorecard._target_dtype == "binary": sc["Points"] = sc["WoE"] * sc["Coefficient"] else: sc["Points"] = sc["Mean"] * sc["Coefficient"] # Linear model coefficients intercept = float(scorecard.estimator_.intercept_) coef = scorecard.estimator_.coef_.ravel() # Big-M parameters (min, max) points. # Proximity weights. Inverse value range for each feature min_p = 0 max_p = 0 wrange = np.empty(n_vars) for i, v in enumerate(s_vars): v_points = sc[sc["Variable"] == v]["Points"] _min = np.min(v_points) _max = np.max(v_points) min_p += _min max_p += _max wrange[i] = 1.0 / (_max - _min) min_p += intercept max_p += intercept # Mahalanobis distance Xt = scorecard.binning_process_.transform(X).values F = np.linalg.cholesky(np.linalg.inv(np.cov(Xt.T))) mu = Xt.mean(axis=0) return intercept, coef, min_p, max_p, wrange, F, mu ``` #### File: optbinning/tests/test_binning_process_sketch.py ```python import pandas as pd from pytest import approx, raises from optbinning import BinningProcessSketch from optbinning import OptimalBinningSketch from optbinning.exceptions import NotSolvedError from optbinning.exceptions import NotDataAddedError from sklearn.datasets import load_breast_cancer data = load_breast_cancer() variable_names = data.feature_names df = pd.DataFrame(data.data, columns=variable_names) y = data.target def test_params(): with raises(TypeError): BinningProcessSketch(variable_names=1) with raises(ValueError): BinningProcessSketch(variable_names=[], max_n_prebins=-2) with raises(ValueError): BinningProcessSketch(variable_names=[], min_n_bins=-2) with raises(ValueError): BinningProcessSketch(variable_names=[], max_n_bins=-2.2) with raises(ValueError): BinningProcessSketch(variable_names=[], min_n_bins=3, max_n_bins=2) with raises(ValueError): BinningProcessSketch(variable_names=[], min_bin_size=0.6) with raises(ValueError): BinningProcessSketch(variable_names=[], max_bin_size=-0.6) with raises(ValueError): BinningProcessSketch(variable_names=[], min_bin_size=0.5, max_bin_size=0.3) with raises(ValueError): BinningProcessSketch(variable_names=[], max_pvalue=1.1) with raises(ValueError): BinningProcessSketch(variable_names=[], max_pvalue_policy="new_policy") with raises(TypeError): BinningProcessSketch(variable_names=[], selection_criteria=[]) with raises(TypeError): BinningProcessSketch(variable_names=[], categorical_variables={}) with raises(TypeError): BinningProcessSketch(variable_names=[], categorical_variables=[1, 2]) with raises(TypeError): BinningProcessSketch(variable_names=[], special_codes={1, 2, 3}) with raises(ValueError): BinningProcessSketch(variable_names=[], split_digits=9) with raises(TypeError): BinningProcessSketch(variable_names=[], binning_fit_params=[1, 2]) with raises(TypeError): BinningProcessSketch(variable_names=[], binning_transform_params=[1, 2]) with raises(TypeError): BinningProcessSketch(variable_names=[], verbose=1) def test_default(): bpsketch = BinningProcessSketch(variable_names) bpsketch.add(df, y) bpsketch.solve() optb = bpsketch.get_binned_variable("mean radius") assert optb.status == "OPTIMAL" optb.binning_table.build() assert optb.binning_table.iv == approx(5.04392547, rel=1e-2) def test_default_merge(): bpsketch_1 = BinningProcessSketch(variable_names) bpsketch_2 = BinningProcessSketch(variable_names) df_1, y_1 = df.iloc[:200, :], y[:200] df_2, y_2 = df.iloc[200:, :], y[200:] bpsketch_1.add(df_1, y_1) bpsketch_2.add(df_2, y_2) bpsketch_1.merge(bpsketch_2) bpsketch_1.solve() optb = bpsketch_1.get_binned_variable("mean radius") assert optb.status == "OPTIMAL" optb.binning_table.build() assert optb.binning_table.iv == approx(5.04392547, rel=1e-2) def test_default_tdigest_merge(): binning_fit_params = {v: {"sketch": "t-digest"} for v in variable_names} bpsketch_1 = BinningProcessSketch(variable_names, binning_fit_params=binning_fit_params) bpsketch_2 = BinningProcessSketch(variable_names, binning_fit_params=binning_fit_params) df_1, y_1 = df.iloc[:200, :], y[:200] df_2, y_2 = df.iloc[200:, :], y[200:] bpsketch_1.add(df_1, y_1) bpsketch_2.add(df_2, y_2) bpsketch_1.merge(bpsketch_2) bpsketch_1.solve() optb = bpsketch_1.get_binned_variable("mean radius") assert optb.status == "OPTIMAL" optb.binning_table.build() assert optb.binning_table.iv == approx(5.04392547, rel=1e-2) def test_default_transform(): bpsketch = BinningProcessSketch(variable_names) bpsketch.add(df, y) with raises(NotSolvedError): bpsketch.transform(df, metric="woe") bpsketch.solve() with raises(TypeError): X_transform = bpsketch.transform(df.values, metric="woe") with raises(ValueError): X_transform = bpsketch.transform(df, metric="new_woe") X_transform = bpsketch.transform(df) optb = OptimalBinningSketch() x = df["mean radius"] optb.add(x, y) optb.solve() assert optb.transform(x, metric="woe") == approx( X_transform["mean radius"], rel=1e-6) def test_information(): bpsketch = BinningProcessSketch(variable_names) with raises(NotDataAddedError): bpsketch.solve() bpsketch.add(df, y) with raises(NotSolvedError): bpsketch.information() bpsketch.solve() with raises(ValueError): bpsketch.information(print_level=-1) bpsketch.information(print_level=0) bpsketch.information(print_level=1) bpsketch.information(print_level=2) ``` #### File: optbinning/tests/test_binning_sketch.py ```python import numpy as np import pandas as pd from pytest import approx, raises from optbinning import OptimalBinningSketch from optbinning.exceptions import NotSolvedError from sklearn.datasets import load_breast_cancer data = load_breast_cancer() df = pd.DataFrame(data.data, columns=data.feature_names) variable = "mean radius" x = df[variable].values y = data.target def test_params(): with raises(TypeError): OptimalBinningSketch(name=1) with raises(ValueError): OptimalBinningSketch(dtype="nominal") with raises(ValueError): OptimalBinningSketch(sketch="new_sketch") with raises(ValueError): OptimalBinningSketch(eps=-1e-2) with raises(ValueError): OptimalBinningSketch(K=-3) with raises(ValueError): OptimalBinningSketch(solver="new_solver") with raises(ValueError): OptimalBinningSketch(divergence="new_divergence") with raises(ValueError): OptimalBinningSketch(max_n_prebins=-2) with raises(ValueError): OptimalBinningSketch(min_n_bins=-2) with raises(ValueError): OptimalBinningSketch(max_n_bins=-2.2) with raises(ValueError): OptimalBinningSketch(min_n_bins=3, max_n_bins=2) with raises(ValueError): OptimalBinningSketch(min_bin_size=0.6) with raises(ValueError): OptimalBinningSketch(max_bin_size=-0.6) with raises(ValueError): OptimalBinningSketch(min_bin_size=0.5, max_bin_size=0.3) with raises(ValueError): OptimalBinningSketch(min_bin_n_nonevent=-2) with raises(ValueError): OptimalBinningSketch(max_bin_n_nonevent=-2) with raises(ValueError): OptimalBinningSketch(min_bin_n_nonevent=3, max_bin_n_nonevent=2) with raises(ValueError): OptimalBinningSketch(min_bin_n_event=-2) with raises(ValueError): OptimalBinningSketch(max_bin_n_event=-2) with raises(ValueError): OptimalBinningSketch(min_bin_n_event=3, max_bin_n_event=2) with raises(ValueError): OptimalBinningSketch(monotonic_trend="new_trend") with raises(ValueError): OptimalBinningSketch(min_event_rate_diff=1.1) with raises(ValueError): OptimalBinningSketch(max_pvalue=1.1) with raises(ValueError): OptimalBinningSketch(max_pvalue_policy="new_policy") with raises(ValueError): OptimalBinningSketch(gamma=-0.2) with raises(ValueError): OptimalBinningSketch(cat_cutoff=-0.2) with raises(TypeError): OptimalBinningSketch(cat_heuristic=1) with raises(TypeError): OptimalBinningSketch(special_codes={1, 2, 3}) with raises(ValueError): OptimalBinningSketch(split_digits=9) with raises(ValueError): OptimalBinningSketch(mip_solver="new_solver") with raises(ValueError): OptimalBinningSketch(time_limit=-2) with raises(TypeError): OptimalBinningSketch(verbose=1) def test_numerical_default(): optb = OptimalBinningSketch(sketch="gk", eps=1e-4) optb.add(x, y) optb.solve() assert optb.status == "OPTIMAL" optb.binning_table.build() assert optb.binning_table.iv == approx(5.04392547, rel=1e-2) optb.binning_table.analysis() assert optb.binning_table.gini == approx(0.87541620, rel=1e-2) assert optb.binning_table.js == approx(0.39378376, rel=1e-2) assert optb.binning_table.quality_score == approx(0.0, rel=1e-2) def test_numerical_default_merge(): optb1 = OptimalBinningSketch(sketch="gk", eps=1e-4) optb2 = OptimalBinningSketch(sketch="gk", eps=1e-4) x1, x2 = x[:200], x[200:] y1, y2 = y[:200], y[200:] optb1.add(x1, y1) optb2.add(x2, y2) optb1.merge(optb2) optb1.solve() assert optb1.status == "OPTIMAL" optb1.binning_table.build() assert optb1.binning_table.iv == approx(5.04392547, rel=1e-2) optb1.binning_table.analysis() assert optb1.binning_table.gini == approx(0.87541620, rel=1e-2) assert optb1.binning_table.js == approx(0.39378376, rel=1e-2) assert optb1.binning_table.quality_score == approx(0.0, rel=1e-2) def test_numerical_default_tdigest(): optb = OptimalBinningSketch(sketch="t-digest", eps=1e-4) optb.add(x, y) optb.solve() assert optb.status == "OPTIMAL" optb.binning_table.build() assert optb.binning_table.iv == approx(5.04392547, rel=1e-2) optb.binning_table.analysis() assert optb.binning_table.gini == approx(0.87541620, rel=1e-2) assert optb.binning_table.js == approx(0.39378376, rel=1e-2) assert optb.binning_table.quality_score == approx(0.0, rel=1e-2) def test_numerical_default_tdigest_merge(): optb1 = OptimalBinningSketch(sketch="t-digest", eps=1e-4) optb2 = OptimalBinningSketch(sketch="t-digest", eps=1e-4) x1, x2 = x[:200], x[200:] y1, y2 = y[:200], y[200:] optb1.add(x1, y1) optb2.add(x2, y2) optb1.merge(optb2) optb1.solve() assert optb1.status == "OPTIMAL" optb1.binning_table.build() assert optb1.binning_table.iv == approx(5.04392547, rel=1e-2) optb1.binning_table.analysis() assert optb1.binning_table.gini == approx(0.87541620, rel=1e-2) assert optb1.binning_table.js == approx(0.39378376, rel=1e-2) assert optb1.binning_table.quality_score == approx(0.0, rel=1e-2) def test_categorical_default_user_splits(): x = np.array([ 'Working', 'State servant', 'Working', 'Working', 'Working', 'State servant', 'Commercial associate', 'State servant', 'Pensioner', 'Working', 'Working', 'Pensioner', 'Working', 'Working', 'Working', 'Working', 'Working', 'Working', 'Working', 'State servant', 'Working', 'Commercial associate', 'Working', 'Pensioner', 'Working', 'Working', 'Working', 'Working', 'State servant', 'Working', 'Commercial associate', 'Working', 'Working', 'Commercial associate', 'State servant', 'Working', 'Commercial associate', 'Working', 'Pensioner', 'Working', 'Commercial associate', 'Working', 'Working', 'Pensioner', 'Working', 'Working', 'Pensioner', 'Working', 'State servant', 'Working', 'State servant', 'Commercial associate', 'Working', 'Commercial associate', 'Pensioner', 'Working', 'Pensioner', 'Working', 'Working', 'Working', 'Commercial associate', 'Working', 'Pensioner', 'Working', 'Commercial associate', 'Commercial associate', 'State servant', 'Working', 'Commercial associate', 'Commercial associate', 'Commercial associate', 'Working', 'Working', 'Working', 'Commercial associate', 'Working', 'Commercial associate', 'Working', 'Working', 'Pensioner', 'Working', 'Pensioner', 'Working', 'Working', 'Pensioner', 'Working', 'State servant', 'Working', 'Working', 'Working', 'Working', 'Working', 'Commercial associate', 'Commercial associate', 'Commercial associate', 'Working', 'Commercial associate', 'Working', 'Working', 'Pensioner'], dtype=object) y = np.array([ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]) optb = OptimalBinningSketch(dtype="categorical", solver="mip", cat_cutoff=0.1, verbose=True) optb.add(x, y) optb.solve() assert optb.status == "OPTIMAL" def test_information(): optb = OptimalBinningSketch(solver="cp") with raises(NotSolvedError): optb.information() optb.add(x, y) optb.solve() with raises(ValueError): optb.information(print_level=-1) optb.information(print_level=0) optb.information(print_level=1) optb.information(print_level=2) optb = OptimalBinningSketch(solver="mip") optb.add(x, y) optb.solve() optb.information(print_level=2) def test_verbose(): optb = OptimalBinningSketch(verbose=True) optb.add(x, y) optb.solve() assert optb.status == "OPTIMAL" ```
{ "source": "JensGM/pyTurbSim", "score": 3 }
#### File: pyTurbSim/examples/newProfModel_example.py ```python from pyts.profModels.base import profModelBase class aNewModel(profModelBase): """ An example class for defining the mean velocity profile. This class must set the values of the '_u' property (memory allocated in profModelBase.__init__). This is the 3 x Nz x Ny mean velocity vector-field. The first index of '_u' is the velocity component. The indexes are: 0: u-component (out of y-z plane component) 1: v-component (lateral horizontal component) 2: w-component (vertical component) The second index of '_u' is the z-direction, and the third is the y-direction of the grid. """ def __init__(self, grid, coef_u, coef_w=[0.01, 0.2]): """ Each profile model should define the __init__ method. The __init__ method must take the *grid* as the first input parameter. All other input parameters can be specified to define the model. The *grid* input parameter is automatically added as an attribute of the profile model. This method should set all three components of the mean velocity field, '_u'. The components default to 0 if they are not set here. """ # In this example, we set the u-component to increase linearly with height: # Note: we are making use of the automatically added 'grid' attribute self._u[0] = coef_u * self.grid.z[:, None] # Arbitrarily chose a factor of 0.3 # Note that the 'grid' object of this TurbSim run is accessible in the # profile model. self.coef_w = coef_w # We can store variables for use in other methods. self._u[2] = self.calc_vertical_velocity() def calc_vertical_velocity(self): """ Define as many methods as you like for helping the __init__ method... """ # Note: again we make use of the automatically added 'grid' attribute, and the stored coef_w attribute. # Here we arbitrarily set the vertical velocity to increase in the # y-direction (not very realistic). return self.coef_w[0] * self.grid.y[None, :] + self.coef_w[1] ``` #### File: pyts/gui/grid.py ```python from matplotlib.figure import Figure import wx from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas import numpy as np from . import gTurbSim_wdr as gts_wdr from .base import ConfigFrame n_grid_max = 10000 # Maximum number of grid points (ny*nz). class deltaTextCtrl(object): @property def x(self,): return float(self.xCtrl.GetValue()) @x.setter def x(self, val): if val is None: self.xCtrl.ChangeValue(str(self.dx * (self.n - self.one))) else: self.xCtrl.SetValue(str(val)) @property def dx(self,): return float(self.dxCtrl.GetValue()) @dx.setter def dx(self, val): if val is None: self.dxCtrl.ChangeValue('%0.3f' % (self.x / (self.n - self.one))) else: self.dxCtrl.SetValue(str(val)) @property def n(self,): return int(self.nCtrl.GetValue()) @n.setter def n(self, val): if val is None: self.nCtrl.ChangeValue(str(int(self.x / self.dx) + self.one)) self.x = None else: self.nCtrl.SetValue(str(int(val))) def on_x(self, event): try: if self.x < 0: return self.dx = None self.parent.refresh(None) except: pass def on_n(self, event): try: if self.n < 1: return self.dx = None self.parent.refresh(None) except: pass def on_dx(self, event): try: if bool(self.mode): self.x = None self.parent.refresh(None) else: self.n = None except: pass def __init__(self, parent, xCtrl, dxCtrl, nCtrl, x_dflt=1.0, dx_dflt=None, n_dflt=None, x_depends_on_dx=True, plus_one=True): """ """ self.xCtrl = xCtrl self.dxCtrl = dxCtrl self.nCtrl = nCtrl self.mode = x_depends_on_dx self.one = plus_one self.parent = parent self.xCtrl.ChangeValue(str(x_dflt)) self.xCtrl.Bind(wx.EVT_TEXT, self.on_x) self.dxCtrl.Bind(wx.EVT_TEXT, self.on_dx) self.nCtrl.Bind(wx.EVT_TEXT, self.on_n) if dx_dflt is not None: self.dx = dx_dflt elif n_dflt is not None: self.n = n_dflt class gridFigure(object): def __init__(self, panel, dpi): self.dpi = dpi self.fig = Figure((1, 2), dpi=self.dpi) self.canvas = FigCanvas(panel, -1, self.fig) self.fig.set_facecolor([0.8, 0.8, 1]) self.axes = self.fig.add_axes([0.0, 0.1, 1.0, 0.9]) self.axbot = self.fig.add_axes([0.0, 0.0, 1.1, 0.1]) self.axbot.fill([0, 1, 1, 0], [0, 0, 1, 1], facecolor=[.58, .29, 0], edgecolor='none', transform=self.axbot.transAxes) self.axbot.set_axis_off() self.axes.set_aspect('equal') self.sizer = wx.BoxSizer(wx.VERTICAL) self.sizer.Add(self.canvas, 0, wx.EXPAND | wx.ALL) panel.SetSizer(self.sizer) panel.Fit() def draw(self, parent): p = parent try: zhub = float(p.inp_hubheight.GetValue()) except: # Don't do anything if we can't interpret the hubht box. return h = p.zCtrl.x w = p.yCtrl.x ny = p.yCtrl.n nz = p.zCtrl.n ax = self.axes ax.cla() self.axes.set_axis_off() y = np.arange(ny) * w / (ny - 1) - w / 2 z = np.arange(nz) * h / (nz - 1) - h / 2 + zhub y, z = np.meshgrid(y, z) ax.plot(0, zhub, 'ro', ms=7, mec='none') ax.plot(y.flatten(), z.flatten(), 'kx') ylm = [0, (zhub + h / 2) * 1.1] ax.set_ylim(ylm) xlm = [-1.05 * w / 2, w / 2 * 1.05] ax.set_xlim(xlm) self.canvas.draw() class gridConfigFrame(ConfigFrame): save_vars = { 'inp_ny': 'NumGrid_Y', 'inp_nz': 'NumGrid_Z', 'inp_height': 'GridHeight', 'inp_width': 'GridWidth', 'inp_dt': 'TimeStep', 'inp_time': 'UsableTime', 'inp_hubheight': 'HubHt', } def __init__(self, parent, id, title, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.CLOSE_BOX | wx.CAPTION): wx.Frame.__init__(self, parent, id, title, pos, size, style) self.config = parent.config #szr=wx.BoxSizer(wx.VERTICAL | wx.NO_BORDER) self.panel = wx.Panel(self) # ,-1,style=wx.NO_BORDER) # This function was created using wxDesigner: gts_wdr.GridSetup(self.panel) # wxDesigner does not allow defining variables, but it does # allow names, so we use this as a hack. for obj in self.panel.Children: if (obj.GetName().startswith('inp_') or obj.GetName().startswith('btn_') or obj.GetName().startswith('pnl_')): setattr(self, obj.GetName(), obj) dpi = wx.ScreenDC().GetPPI() self.fig = gridFigure(self.pnl_grid, dpi[0]) self.fig.draw(self) # Set bindings: wx.EVT_CLOSE(self, self.OnCloseWindow) self.btn_cancel.Bind(wx.EVT_BUTTON, self.OnCloseWindow) self.btn_ok.Bind(wx.EVT_BUTTON, self.OnOk) self.inp_hubheight.Bind(wx.EVT_TEXT, self.refresh) # Initialize the field values and set bindings: self.zCtrl = deltaTextCtrl( self, self.inp_height, self.inp_dz, self.inp_nz, x_dflt=self.config['GridHeight'], n_dflt=self.config['NumGrid_Z']) self.yCtrl = deltaTextCtrl( self, self.inp_width, self.inp_dy, self.inp_ny, x_dflt=self.config['GridWidth'], n_dflt=self.config['NumGrid_Y']) self.tCtrl = deltaTextCtrl(self, self.inp_time, self.inp_dt, self.inp_nt, x_dflt=self.config['UsableTime'], dx_dflt=self.config['TimeStep'], x_depends_on_dx=False, plus_one=0) self.inp_hubheight.SetValue('%0.2g' % (self.config['HubHt'],)) self.Fit() ``` #### File: pyts/gui/prof.py ```python from matplotlib.figure import Figure from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas from matplotlib.ticker import MaxNLocator import numpy as np import wx from . import gTurbSim_wdr as gts_wdr from .base import ConfigFrame from ..runInput.main import cfg2tsrun import copy from ..main import tsrun from ..base import tsGrid class profFigure(object): def __init__(self, panel, dpi): self.dpi = dpi self.fig = Figure((1.9, 3.4), dpi=self.dpi) self.canvas = FigCanvas(panel, -1, self.fig) self.fig.set_facecolor('w') self.axes = self.fig.add_axes([0.24, 0.24, .7, 0.7]) self.sizer = wx.BoxSizer(wx.VERTICAL) self.sizer.Add(self.canvas, 0, wx.EXPAND | wx.ALL) panel.SetSizer(self.sizer) panel.Fit() def draw(self, parent): parent.inputs2config() ax = self.axes ax.cla() tsr = cfg2tsrun(copy.deepcopy(parent.config)) pr = tsr.prof ax.plot(pr.u[:, pr.grid.ihub[1]], pr.z, 'r+', ms=7) zmx = np.max(pr.z) tsrtmp = tsrun() nz = 100. tsrtmp.grid = tsGrid(center=tsr.grid.center, ny=3, dy=tsr.grid.width / 2, nz=nz, dz=zmx / nz, dt=1, nt=1000,) tsrtmp.prof = tsr.profModel tsrtmp.grid.zhub = tsrtmp.grid.center tsrtmp.grid.z = np.arange(zmx / 40, zmx * 1.1, zmx / nz) pr = tsrtmp.prof ax.plot(pr.u[:, 1], pr.z, 'k-', zorder=-5) ax.set_xlim([0, None]) ax.set_ylim([0, None]) ax.xaxis.set_major_locator(MaxNLocator(5)) self.axes.set_xlabel('u [m/s]') self.axes.set_ylabel('z [m]') self.canvas.draw() class profConfigFrame(ConfigFrame): # The keys in this list must match the input file option. # The keys in this dict should match the keys in the model_aliases dict. exclude_vars = { 'H2L': ['inp_plexp', 'inp_zjet'], 'LOG': ['inp_plexp', 'inp_zjet'], 'PL': ['inp_zjet', 'inp_ustar'], 'IEC': ['inp_zjet'], 'JET': ['inp_plexp', 'inp_ustar'], } save_vars = { 'cho_profmodel': 'WindProfileType', 'inp_refheight': 'RefHt', 'inp_refvel': 'URef', 'inp_ustar': 'UStar', 'inp_plexp': 'PLExp', 'inp_zjet': 'ZJetMax', 'inp_hflowang': 'HFlowAng', 'inp_vflowang': 'VFlowAng', } def init_layout(self,): self.panel = wx.Panel(self) # ,-1,style=wx.NO_BORDER) gts_wdr.profSetup(self.panel) @property def model(self,): return self.cho_profmodel.value def init_fig(self,): dpi = wx.ScreenDC().GetPPI() self.fig = profFigure(self.pnl_profile, dpi[0]) def set_aliases(self,): self.cho_profmodel.aliases = { 'H2L': ['H2O Log'], 'PL': ['POWER'], } def init_bindings(self,): wx.EVT_CLOSE(self, self.OnCloseWindow) self.btn_cancel.Bind(wx.EVT_BUTTON, self.OnCloseWindow) self.btn_ok.Bind(wx.EVT_BUTTON, self.OnOk) self.inp_zjet.Bind(wx.EVT_TEXT, self.refresh) self.inp_refheight.Bind(wx.EVT_TEXT, self.refresh) self.inp_refvel.Bind(wx.EVT_TEXT, self.refresh) self.inp_plexp.Bind(wx.EVT_TEXT, self.refresh) self.inp_hflowang.Bind(wx.EVT_TEXT, self.refresh) self.inp_vflowang.Bind(wx.EVT_TEXT, self.refresh) self.cho_profmodel.Bind(wx.EVT_CHOICE, self.update) ``` #### File: pyts/io/formatter.py ```python from string import Formatter class SuperFormatter(Formatter): r""" SuperFormatter adds the following capabilities: 1. Initialize with a template string, and the :meth:`__call__` method uses this string. Thus, example usage of this formatter looks like:: template = SuperFormatter(template_string) out_string = template(*args, **kwargs) 2. White space at the end of a format specifier is stripped. This allows for aligning text within the template. 3. Multiple format strings separated by ``|`` can be specified within a template. This formatter will loop over the format strings until it finds one that doesn't throw a ValueError. For example, the format string ``6d|<6.3f|8s`` will format the following objects as: +----------------+------------------+ | input | output string | +================+==================+ | ``3.74583754`` | ``'3.746 '`` | +----------------+------------------+ | ``384`` | ``' 384'`` | +----------------+------------------+ | ``None`` | ``'None '``| +----------------+------------------+ 4. Default values may be specified after a ``/`` at the end of the format string. For example if the container is ``{show_data:s/False}``, and there is no key ``show_data`` in ``**kwargs``, then ``False`` will fill that location. 5. The :attr:`format_prfx` attribute allows the user to define a default container prefix. This will be prepended to all format specifiers that are a single-character `type` specifier. For example if ``format_prfx = '<20'``, then the format specifier ``'f'`` will be changed to ``'<20f'``, but ``'>08.3f'`` will be unchanged. This is applied to each specifier within a multiple specification, thus ``'d|f|s'`` would actually be ``'<20d|<20f|<20s'``. 6. Custom format specifiers have been implemented by adding a hook that searches for a `_format_<specifier>` method prior to running the normal formatting routines. That method takes the value to be formatted as input (in addition to *self*), and should return the fully-formatted string (no further formatting is applied). For example, a custom format specifier `pet` (specified as ``{my_dog:pet}`` in the template) could be defined as:: class MyNewFormatter(SuperFormatter): def _format_pet(self, value): return value.upper() Note that this will throw an ``AttributeError`` if *my_dog* is an object without an ``upper`` method (i.e. not a string), but you could add to the method to handle all of the different types that ``value`` might be. 7. Custom format specifiers with arguments can be specified as ``{my_dogs:pets(10s,10s)}``. In this case the string inside the parenthesis is supplied as the second argument to the ``_format_pets`` method. The method that implements this format could be defined as:: class MyNewFormatter(SuperFormatter): def _format_pets(self, value, form2): out = '' for v,f in zip(value, form2.split(',')): out += format(v, f) return out """ format_prfx = '' default_format_prfx = '' allow_sloppy = False def __init__(self, template): # Override the base methods to initialize the formatter with # the template string. self.template = template def __call__(self, *args, **kwargs): r""" Format the template string with `*args` and `**kwargs`. """ return self.format(self.template, *args, **kwargs) def __iter__(self,): return self.parse(self.template) def get_value(self, key, args, kwargs): key = key.rstrip() self._current_name = key if isinstance(key, (int, long)): return args[key] else: try: return kwargs[key] except KeyError: return None def _fail(self): if self.allow_sloppy: return '??SOME JUNK??' else: # This _current_name business is a DIRTY HACK. raise KeyError("'%s' not specified and no default " "value found in template." % self._current_name) def _format_default(self, default_val): return format(default_val, self.default_format_prfx + 's') def format_field(self, value, format_spec): format_spec = format_spec.rstrip() # Strip trailing spaces default_val = None if '/' in format_spec: format_spec, default_val = format_spec.split('/', 1) # set the default value if there is no input if value is None: return self._format_default(default_val) elif value is None: return self._fail() if '|' in format_spec: format_spec = format_spec.split('|') else: format_spec = [format_spec] for form in format_spec: formtail = None if '(' in form and form.endswith(')'): form, formtail = form.split('(', 1) formtail = formtail[:-1] try: if hasattr(self, '_format_' + form): if formtail is None: return getattr(self, '_format_' + form)(value) else: return getattr(self, '_format_' + form)(value, formtail) if form in ["b", "c", "d", "e", "E", "f", "F", "g", "G", "n", "o", "s", "x", "X", "%", '']: form = self.format_prfx + form return format(value, form) except ValueError: pass except TypeError: pass # Finally, try the default again: if default_val is None: raise ValueError('Invalid conversion specification') return self._format_default(default_val) ``` #### File: pyts/io/main.py ```python import read readers = {'wnd': read.bladed, 'bl': read.bladed, 'bts': read.turbsim, } def readModel(fname, ): """ Read a TurbSim data and input file and return a :class:`tsdata <pyts.main.tsdata>` data object. Parameters ---------- fname : str The filename to load. If the file ends in: - .bl or .wnd, the file is assumed to be a bladed-format file. - .bts, the file is assumed to be a TurbSim-format file. Returns ------- tsdata : :class:`tsdata <pyts.main.tsdata>` The TurbSim data contained in the binary data file. """ for sfx, rdr in readers.iteritems(): if fname.endswith(sfx): return rdr(fname) # Otherwise try reading it as a .wnd file. read.bladed(fname) # This will raise an error if it doesn't work. ``` #### File: pyTurbSim/pyts/main.py ```python from .base import ts_complex, gridProps, dbg, np, statObj from .profModels.base import profModelBase, profObj from .specModels.base import specModelBase, specObj from .cohereModels.base import cohereModelBase, cohereObj, cohereUser from .stressModels.base import stressModelBase, stressObj from .phaseModels.api import randPhase from ._version import * from .io import write from numpy import random from numpy import ulonglong from numpy.fft import irfft import time # !!!VERSION_INCONSISTENCY # inconsistency between this and older versions of TurbSim # !!!CHECKTHIS # means I need to ensure that something is right. # !!!FIXTHIS # means I know I am doing something wrong. # !!!ADDDOC # Means add documentation here # TODO: # - Testing: # . Test 'user-defined' models # - Documentation # . Document plotting tools. ################ ## These require branches # - Fix Reynold's stress! # - Break Cholesky from coherence models/objects and into 'main', or tslib. # . How do callbacks work fortran->python? # . Can we implement this so that 'models/objects' are separated from tslib? # - Add 'mods': # . Add ability to rotate mean velocity field (for a prof instance and a profModel). # . Add ability to add veer to mean velocity field (prof instance and profModel). # - Write .sum summary files (io package), (so they are fully self-contained). # . Add parameter logging, so that we can write summary files that # track all parameters that were input. ################ ## Low priority # - Write FF files (tsio.py). # - Write HubHeight files (tsio.py). # - Add KHtest functionality? (rgrep for '#KHTEST') # - Write 'events' (includes adding 'coherent events' to TS) class tsrun(object): """ This is the PyTurbSim 'run' class. This class provides the interface for controlling PyTurbSim simulations and output. Examples of how to use this class, and the PyTurbSim interface in general can be found in the PyTurbSim /examples directory. Parameters ---------- RandSeed : int,optional ('random value') Initialize the run-object with a RandSeed. ncore : int,optional (1) Number of cores (processors) to use for the pyTurbSim run """ def __init__(self, RandSeed=None, ncore=1): """ PyTurbSim 'run' objects can be initialized with a specific random seed, `RandSeed`, and number of cores, `ncore`. """ # Initialize the random number generator before doing anything else. if RandSeed is None: self.RandSeed = random.randint(-2147483647, 2147483647) else: self.RandSeed = RandSeed # Seeds for numpy must be positive, but original-TurbSim had # negative seeds. In order to attempt to be consistent, we # use the values in the files but make them positive for the # numpy random generator. self.randgen = random.RandomState( ulonglong(self.RandSeed + 2147483648)) self.ncore = ncore if dbg: self.timer = dbg.timer('Veers84') # For now this is a place-holder, I may want to make this an # 'input property' eventually. phase = randPhase() @property def prof(self): """ This is the 'mean velocity profile' input property. This property returns a 'profObj'. This property can be defined with three types of objects: 1) define it with a 'profile model' (recommended):: ts_run.prof=a_prof_model In this case the model is set to my_ts_run.profModel, and this model is called to produce a profObj AS NEEDED. At the end of the ts_run call that profObj is cleared so that subsequent runs do not use a fixed profObj (i.e. in the case that the model is modified or another model/object that the profile model depends on is changed between runs). 2) define it with a profObj directly (profile statistic-object):: ts_run.prof=a_prof_model(ts_run) In this case the profObj is FIXED. That is, all subsequent PyTurbSim runs will utilize this profile, which is based on the state of the a_prof_model and ts_run at the time of the profObj creation. 3) define it with an array directly:: ts_run.prof=a_numpy_array [units: m/s] In this case the profObj is again fixed and defined by the input array. The numpy array dimensions must match those of the gridObj. That is, the dimensions of the array should be (3 x grid.n_z x grid.n_y). The first dimension is for each component of the profile (u,v,w), the next two are for each point (z,y) in the grid. See Also -------- pyts.profModels.api : to see available profile models. tsrun.spec tsrun.cohere tsrun.stress """ if hasattr(self, 'profModel') and not hasattr(self, '_prof'): self._prof = self.profModel(self) return self._prof @prof.setter def prof(self, val): if profModelBase in val.__class__.__mro__: self.profModel = val elif np.ndarray in val.__class__.__mro__: self._prof = profObj(self) self._prof.array[:] = val elif profObj in val.__class__.__mro__: self._prof = val else: raise Exception('The input must be a profile model, ' 'profile object or numpy array; it is none of these.') @prof.deleter def prof(self,): if hasattr(self, 'profModel'): del self._prof @property def spec(self): """ This is the 'tke spectrum' input property. This property always returns a `.specObj`. This property can be defined with three types of objects: 1) define it with a 'spectral model' (recommended):: ts_run.spec=a_spec_model In this case the model is set to my_ts_run.specModel, and this model is called to produce a specObj AS NEEDED. At the end of the ts_run call that specObj is cleared so that subsequent runs do not use a fixed specObj (i.e. in the case that another model/object that the spectral model depends on is changed between runs). 2) define it with a specObj directly:: ts_run.spec=a_spec_model(ts_run) In this case the specObj is FIXED. That is, all subsequent PyTurbSim runs will utilize this spectral model, which is based on the state of ts_run at the time of the specObj creation. 3) define it with an array directly:: ts_run.spec=a_numpy_array - [units: m^2/(s^2.Hz)] In this case the specObj is again fixed and defined by the input array. The numpy array dimensions must match those of the gridObj. That is, the dimensions of the array should be (3 x grid.n_z x grid.n_y x grid.n_f). The first dimension is for each component of the spectrum (u,v,w), the next two are for each point (z,y) in the grid, and the last dimension is the frequency dependence of the spectrum. See Also -------- pyts.specModels.api : to see available spectral models. tsrun.prof tsrun.cohere tsrun.stress """ if hasattr(self, 'specModel') and not hasattr(self, '_spec'): self._spec = self.specModel(self) return self._spec @spec.setter def spec(self, val): if specModelBase in val.__class__.__mro__: self.specModel = val elif np.ndarray in val.__class__.__mro__: self._spec = specObj(self) self._spec.array[:] = val elif specObj in val.__class__.__mro__: self._spec = val else: raise Exception('The input must be a spectral model, ' 'spectra object or numpy array; it is none of these.') @spec.deleter def spec(self,): if hasattr(self, 'specModel'): del self._spec @property def cohere(self): """ This is the 'coherence' input property. This property always returns a :class:`~.cohereModels.base.cohereObj`. Because the bulk of PyTurbSim's computational requirements (memory and processor time) are consumed by dealing with this statistic, it behaves somewhat differently from the others. In particular, rather than relying on arrays for holding data 'coherence objects' define functions that are called as needed. This dramatically reduces the memory requirements of PyTurbSim without increasing. See the cohereModels package documentation for further details. Fortunately, at this level, coherence is specified identically to other statistics... This property can be defined with three types of objects: 1) define it with a 'coherence model' (recommended):: ts_run.cohere=a_coherence_model In this case the model is set to my_ts_run.cohereModel, and this model sets the is called at runtime to produce the phase array. At the end of the ts_run call that phase array is cleared so that subsequent runs do not use a fixed phase information (i.e. in the case that the coherence model is modified or another model/object that the coherence model depends on is changed between runs). 2) define it with a cohereObj directly :: ts_run.spec=a_coherence_model(ts_run) In this case the cohereObj is FIXED. That is, all subsequent PyTurbSim runs will utilize this coherence model, which is based on the state of ts_run at the time of execution of this command. 3) define it with an array directly:: ts_run.cohere=a_numpy_array - [units: non-dimensional] In this case the coherence will be fixed and defined by this input array. The numpy array dimensions must match those of the gridObj. That is, the dimensions of the array should be (3 x grid.n_p x grid.n_p x grid.n_f). The first dimension is for each component of the spectrum (u,v,w), the next two are for each point-pair (z,y) in the grid, and the last dimension is the frequency dependence of the spectrum. This approach for specifying the coherence - while explicit and flexible - requires considerably more memory than the 'coherence model' approach. Furthermore using this approach one must be careful to make sure that the ordering of the array agrees with that of the 'flattened grid' (see the gridObj.flatten method, and/or the cohereUser coherence model for more information). See Also -------- pyts.cohereModels.api : to see a list of available coherence models. pyts.cohereModels.base.cohereUser : the 'user-defined' or 'array-input' coherence model. tsrun.prof tsrun.spec tsrun.stress """ if hasattr(self, 'cohereModel') and not hasattr(self, '_cohere'): self._cohere = self.cohereModel(self) return self._cohere @cohere.setter def cohere(self, val): if cohereModelBase in val.__class__.__mro__: self.cohereModel = val elif np.ndarray in val.__class__.__mro__: self.cohereModel = cohereUser(val) elif cohereObj in val.__class__.__mro__: self.cohere = val else: raise Exception('The input must be a coherence model, ' 'coherence object or numpy array; it is none of these.') @cohere.deleter def cohere(self,): if hasattr(self, 'cohereModel'): del self._cohere @property def stress(self): """ This is the Reynold's stress input property. This property always returns a :class:`.stressObj`. This property can be defined with three types of objects: 1) define it with a `specModel` (recommended):: ts_run.stress=a_stress_model In this case the model is set to my_ts_run.stressModel, and this model is called to produce a stressObj AS NEEDED. At the end of the ts_run call that stressObj is cleared so that subsequent runs do not use a fixed stressObj (i.e. in the case that another model/object that the stress model depends on is changed between runs). 2) define it with a `stressObj` directly:: ts_run.stress=a_stress_model(ts_run) In this case the stressObj is FIXED. That is, all subsequent PyTurbSim runs will utilize this stress model, which is based on the state of ts_run at the time of the stressObj creation. 3) define it with an array directly:: ts_run.stress=a_numpy_array - [units: m^2/s^2] In this case the stressObj is again fixed and defined by the input array. The numpy array dimensions must match those of the gridObj. That is, the dimensions of the array should be (3 x grid.n_z x grid.n_y). The first dimension is for each component of the stress (u,v,w), the next two are for each point (z,y) in the grid. See Also -------- pyts.stressModels.api : To see available stress models. tsrun.prof tsrun.spec tsrun.cohere """ if hasattr(self, 'stressModel') and not hasattr(self, '_stress'): self._stress = self.stressModel(self) return self._stress @stress.setter def stress(self, val): if stressModelBase in val.__class__.__mro__: self.stressModel = val elif np.ndarray in val.__class__.__mro__: self._stress = stressObj(self) self._stress.array[:] = val elif stressObj in val.__class__.__mro__: self._stress = val else: raise Exception('The input must be a stress model, ' 'stress object or numpy array; it is none of these.') @stress.deleter def stress(self,): if hasattr(self, 'stressModel'): del self._stress def reset(self, seed=None): """ Clear the input statistics and reset the Random Number generator to its initial state. """ del self.prof del self.spec del self.cohere del self.stress if seed is None: self.randgen.seed(self.RandSeed) else: self.randgen.seed(seed) @property def info(self,): """ Model names and initialization parameters. """ out = dict() out['version'] = (__prog_name__, __version__, __version_date__) out['RandSeed'] = self.RandSeed out['StartTime'] = self._starttime if hasattr(self, '_config'): out['config'] = self._config for nm in ['profModel', 'specModel', 'cohereModel', 'stressModel']: if hasattr(self, nm): mdl = getattr(self, nm) out[nm] = dict(name=mdl.model_name, description=mdl.model_desc, params=mdl.parameters, sumstring=mdl._sumfile_string(self), ) else: out[nm] = None out['RandSeed'] = self.RandSeed out['RunTime'] = time.time() - time.mktime(self._starttime) return out def run(self,): """ Run PyTurbSim. Before calling this method be sure to set the following attributes to their desired values: - :attr:`tsrun.prof`: The mean profile model, object or array. - :attr:`tsrun.spec`: The tke spectrum model, object or array. - :attr:`tsrun.cohere`: The coherence model, object or array. - :attr:`tsrun.stress`: The Reynold's stress model, object or array. Returns ------- tsdata : :class:`tsdata` """ self._starttime = time.localtime() self.timeseries = self._calcTimeSeries() out = self._build_outdata() return out __call__ = run def _build_outdata(self,): """ Construct the output data object and return it. """ out = tsdata(self.grid) out.uturb = self.timeseries out.uprof = self.prof.array out.info = self.info return out def _calcTimeSeries(self,): """ Compute the u,v,w, timeseries based on the spectral, coherence and Reynold's stress models. This method performs the work of taking a specified spectrum and coherence function and transforming it into a spatial timeseries. It performs the steps outlined in Veers84's [1]_ equations 7 and 8. Returns ------- turb : the turbulent velocity timeseries array (3 x nz x ny x nt) for this PyTurbSim run. Notes ----- 1) Veers84's equation 7 [1]_ is actually a 'Cholesky Factorization'. Therefore, rather than writing this functionality explicitly we call 'cholesky' routines to do this work. 2) This function uses one of two methods for computing the Cholesky factorization. If the Fortran library tslib is available it is used (it is much more efficient), otherwise the numpy implementation of Cholesky is used. .. [1] <NAME> (1984) 'Modeling Stochastic Wind Loads on Vertical Axis Wind Turbines', Sandia Report 1909, 17 pages. """ grid = self.grid tmp = np.zeros((grid.n_comp, grid.n_z, grid.n_y, grid.n_f + 1), dtype=ts_complex) if dbg: self.timer.start() # First calculate the 'base' set of random phases: phases = self.phase(self) # Now correlate the phases at each point to set the Reynold's stress: phases = self.stress.calc_phases(phases) # Now correlate the phases between points to set the spatial coherence: phases = self.cohere.calc_phases(phases) # Now multiply the phases by the spectrum... tmp[..., 1:] = np.sqrt(self.spec.array) * grid.reshape(phases) # and compute the inverse fft to produce the timeseries: ts = irfft(tmp) if dbg: self.timer.stop() # Select only the time period requested: # Grab a random number of where to cut the timeseries. i0_out = self.randgen.randint(grid.n_t - grid.n_t_out + 1) ts = ts[..., i0_out:i0_out + grid.n_t_out] / (grid.dt / grid.n_f) ** 0.5 ts -= ts.mean(-1)[..., None] # Make sure the turbulence has zero mean. return ts class tsdata(gridProps): """ TurbSim output data object. In addition to the output of a simulation (velocity timeseries array) it also includes all information for reproducing the simulation. Parameters ---------- grid : :class:`gridObj` TurbSim data objects are initialized with a TurbSim grid. """ @property def _sumdict(self): out = dict() # Start by pulling values from the config file # if there was one. if 'config' in self.info: out.update(self.info['config']) uhub = out['uhub'] = statObj(self.uhub) out['vhub'] = statObj(self.vhub, uhub.mean) out['whub'] = statObj(self.whub, uhub.mean) out['hhub'] = statObj(np.sqrt(self.uhub ** 2 + self.vhub ** 2)) out['grid'] = self.grid out['upvp'] = statObj(self.uhub * self.vhub) out['upwp'] = statObj(self.vhub * self.whub) out['vpwp'] = statObj(self.vhub * self.whub) out['upvp'].scale = 1 out['upwp'].scale = 1 out['vpwp'].scale = 1 out['tke'] = statObj((self.uturb ** 2).sum(0)) out['tke'] = statObj((self.uturb ** 2).sum(0)) out['ctke'] = statObj(0.5 * np.sqrt( (self.uturb[0] * self.uturb[1]) ** 2 + (self.uturb[0] * self.uturb[2]) ** 2 + (self.uturb[1] * self.uturb[2]) ** 2)) out['u_sigma'] = self.uturb[0].flatten().std() out['v_sigma'] = self.uturb[1].flatten().std() out['w_sigma'] = self.uturb[2].flatten().std() out['TurbModel_desc'] = self.info['specModel']['description'] out['RandSeed1'] = self.info['RandSeed'] out['profModel_sumstring'] = self.info['profModel']['sumstring'] out['specModel_sumstring'] = self.info['specModel']['sumstring'] out['stressModel_sumstring'] = self.info['stressModel']['sumstring'] out['cohereModel_sumstring'] = self.info['cohereModel']['sumstring'] out['ver'] = ver out['NowDate'] = time.strftime('%a %b %d, %Y', self.info['StartTime']) out['NowTime'] = time.strftime('%H:%M:%S', self.info['StartTime']) out['RunTime'] = self.info['RunTime'] out['FreqNyquist'] = self.f[-1] out['GridBase'] = self.grid.z[0] out['HeightOffset'] = 0.0 # Is this correct? out['ydata'] = self.grid.y out['z_ustd'] = np.concatenate((self.grid.z[:, None], self.uturb[0].std(-1)), axis=1) out['z_vstd'] = np.concatenate((self.grid.z[:, None], self.uturb[1].std(-1)), axis=1) out['z_wstd'] = np.concatenate((self.grid.z[:, None], self.uturb[2].std(-1)), axis=1) u, v, w = self.uprof.mean(-1)[:, :, None] out['WINDSPEEDPROFILE'] = np.concatenate(( self.grid.z[:, None], np.sqrt(u ** 2 + v ** 2), np.angle(u + 1j * v) * 180 / np.pi, u, v, w, ), axis=1) out['HFlowAng'] = np.angle(self.uprof[0][self.ihub] + 1j * self.uprof[1][self.ihub]) out['VFlowAng'] = np.angle(self.uprof[0][self.ihub] + 1j * self.uprof[2][self.ihub]) out['TurbModel'] = self.info['specModel']['name'] out['gridheader'] = '--------- ' * self.grid.n_y for nm in ['Zref', 'RefHt', 'ZRef', ]: if nm in self.info['profModel']['params']: out['RefHt'] = self.info['profModel']['params'][nm] for nm in ['URef', 'Uref', ]: if nm in self.info['profModel']['params']: out['URef'] = self.info['profModel']['params'][nm] out['PLExp'] = self.info['profModel']['params'].get('PLexp', None) return out def __getitem__(self, ind): if not hasattr(ind, '__len__'): ind = [ind] else: list(ind) for idx, val in enumerate(ind): if val.__class__ is not slice: ind[idx] = slice(val, val + 1) out = type(self)(self.grid[ind]) ind = [slice(None)] + list(ind) out.uturb = self.uturb[ind] out.uprof = self.uprof[ind] return out @property def parameters(self,): out = {} if hasattr(self, 'info'): for nm in ['profModel_params', 'specModel_params', 'cohereModel_params', 'stressModel_params']: if nm in self.info: out.update(self.info[nm]) return out def __init__(self, grid): """ Initialize a tsdata object with a grid object. """ self.grid = grid @property def shape(self,): """ The shape of the turbulence time-series (output) array. """ return self.uturb.shape @property def ihub(self,): """ The index of the hub. """ return self.grid.ihub @property def time(self,): """ The time vector, in seconds, starting at zero. """ if not hasattr(self, '_time'): self._time = np.arange(0, self.uturb.shape[-1] * self.dt, self.dt) return self._time def __repr__(self,): return ('<TurbSim data object:\n' '%d %4.2fs-timesteps, %0.2fx%0.2fm (%dx%d) z-y grid (hubheight=%0.2fm).>' % (self.uturb.shape[-1], self.dt, self.grid.height, self.grid.width, self.grid.n_z, self.grid.n_y, self.grid.zhub)) @property def utotal(self,): """ The total (mean + turbulent), 3-d velocity array """ return self.uturb + self.uprof[:, :, :, None] @property def u(self,): """ The total (mean + turbulent), u-component of velocity. """ return self.uturb[0] + self.uprof[0, :, :, None] @property def v(self,): """ The total (mean + turbulent), v-component of velocity. """ return self.uturb[1] + self.uprof[1, :, :, None] @property def w(self,): """ The total (mean + turbulent), w-component of velocity. """ return self.uturb[2] + self.uprof[2, :, :, None] @property def UHUB(self,): """ The hub-height mean velocity. """ return self.uprof[0][self.ihub] @property def uhub(self,): """ The hub-height u-component time-series. """ return self.u[self.ihub] @property def vhub(self,): """ The hub-height v-component time-series. """ return self.v[self.ihub] @property def whub(self,): """ The hub-height w-component time-series. """ return self.w[self.ihub] @property def tke(self,): """ The turbulence kinetic energy. """ return (self.uturb ** 2).mean(-1) @property def ctke(self,): return 0.5 * np.sqrt((self.stress ** 2).mean(-1).sum(0)) @property def Ti(self,): """ The turbulence intensity, std(u')/U, at each point in the grid. """ return np.std(self.uturb[0], axis=-1) / self.uprof[0] @property def stress(self,): """ The Reynold's stress tensor. """ if not hasattr(self, '_dat_stress'): self._stress_dat = np.concatenate( (np.mean(self.uturb[0] * self.uturb[1], axis=-1)[None], np.mean(self.uturb[0] * self.uturb[2], axis=-1)[None], np.mean(self.uturb[1] * self.uturb[2], axis=-1)[None]), 0) return self._stress_dat @property def upvp_(self,): """ The u'v' component of the Reynold's stress. """ return self.stress[0] @property def upwp_(self,): """ The u'w' component of the Reynold's stress. """ return self.stress[1] @property def vpwp_(self,): """ The v'w' component of the Reynold's stress. """ return self.stress[2] @property def stats(self,): """ Compute and return relevant statistics for this turbsim time-series. Returns ------- stats : dict A dictionary containing various statistics of interest. """ slc = [slice(None)] + list(self.ihub) stats = {} stats['Ti'] = self.tke[slc] / self.UHUB return stats def write_formatted(self, filename): """ Save the data in this tsdata object in 'formatted' format (.u, .v, .w files). Parameters ---------- filename : string '.u', '.v', and '.w' will be appended to the end of the filename. """ write.formatted(filename, self) def write_bladed(self, filename): """ Save the data in this tsdata object in 'bladed' format (.wnd). Parameters ---------- filename : str The filename to which the data should be written. """ write.bladed(filename, self) def write_turbsim(self, filename): """Save the data in this tsdata object in 'TurbSim' format. Parameters ---------- filename : str The filename to which the data should be written. """ write.turbsim(filename, self) def write_sum(self, filename): """ Currently PyTurbSim does not support writing summary (.sum) files. """ write.sum(filename, self._sumdict) if write.h5py is not None: def write_hdf5(self, filename): """Save the data in this tsdata object as an hdf5 file. Parameters ---------- filename : str The filename to which the data should be written. """ write.hdf5(filename, self) ``` #### File: pyts/profModels/api.py ```python A polynomial mean-velocity profile, where coefficients can be specified at initialization. \""" def __init__(self,Uref,Zref,poly_coefs=[1.,0.]): # All units should be in meters and seconds. self.Uref=Uref self.Zref=Zref self.poly_coefs=poly_coefs def __call__(self,tsrun): out=pm.profObj(tsrun) # Note here that tsrun contains the 'grid information', and # we can use that information to construct the profObj. out[0]=self.Uref*numpy.polyval(self.poly_coefs,tsrun.grid.z/Zref) # Here we have set the u-component (index 0) to follow the # polynomial, and we leave the other components to be zero. return out That's all that is required to define a new profile model! Now, assuming you have already created a :class:`.tsrun` instance (e.g. `tsr`) you set that :class:`.tsrun` to use your new model by simply doing, for example: >>> tsr.prof = my_new_model(3.,20,[.5,0]) Now your PyTurbSim run will utilize your newly defined model! Notes ----- For a description of the difference between 'profile models' (e.g. 'my_prof_model' in example above) and the profile they output (tsrun.prof), see the :doc:`../code-framework` section of the PyTurbSim documentation. """ from . import log as log_models from . import power as power_models from . import iec as iec_models #from jet import main as jet from . import simple from .base import profModelBase # Alias the log models h2l = log_models.H2O log = log_models.nwtc # Alias the power-law model pl = power_models.nwtc # Alias the iec model. iec = iec_models.main uniform = simple.uniform linear = simple.linear ``` #### File: pyts/profModels/jet.py ```python from .base import profModelBase, np, profObj from numpy.polynomial.chebyshev import chebval class main(profModelBase): """Low-level jet wind profile model. Parameters ---------- URef : float Reference velocity for the wind profile [m/s]. ZRef : float Reference height of the reference velocity [m]. UStar : float The bottom-boundary friction velocity [m/s]. Ri : float The Richardson number stability parameter. zjet_max : float, optional The maximum height of the jet. If a value is not specified, the zjet_max property provides a default. """ def __init__(self, URef, ZRef, UStar, Ri, zjet_max=None): self.URef = URef self.ZRef = ZRef self.UStar = UStar self.Ri = Ri self.zjet_max = zjet_max def _sumfile_string(self,): sumstring_format = """ Profile model used = {dat.model_desc} Reference velocity (URef) = {dat.Uref:0.2f} Reference height (ZRef) = {dat.ZRef:0.2f} Richardson Number (RICH_NO) = {dat.Ri:0.2f} Jet Height (ZJet_Max) = {dat.zjet_max:0.2f} """ return sumstring_format.format(dat=self,) def __call__(self, tsrun): """ Create and calculate the mean-profile object for a `tsrun` instance. Parameters ---------- tsrun : :class:`.tsrun` A TurbSim run object. Returns ------- out : :class:`.profObj` A jet wind-speed profile for the grid in `tsrun`. """ out = profObj(tsrun) u, v = self._model(out) out[0], out[1] = u[:, None], v[:, None] return out def _model(self, out): z = out.grid.z HtIndx = min(max(int(self.zjet_max - 50) / 20 - 1, 0), 20) scoef = spd_coefs[HtIndx] dcoef = dir_coefs[HtIndx] prms = np.array([0, self.Ri, self.UStar, 1]) if self.zjet_max == self.ZRef: prms[0] = self.URef else: utmp1 = chebval( self.ZRef, np.dot(scoef[:, 1:], prms[1:][:, None])) utmp2 = chebval(self.ZRef, scoef[:, 0]) prms[0] = (self.URef - utmp1) / utmp2 scoef = np.dot(scoef, prms[:, None]) # These are now vectors dcoef = np.dot(dcoef, prms[:, None]) # These are now vectors ang = chebval(z, dcoef[:, 0]) ang -= ang[out.grid.ihub[0]] # The hub-height angle should be zero. ang[ang < -45.], ang[ang > 45.] = - \ 45., 45. # No angle should be more than 45 degrees. tmpdat = chebval(z, scoef) * np.exp(1j * np.pi / 180. * ang)[:, None] return tmpdat.real, tmpdat.imag @property def zjet_max(self,): """ The value of the jet height. This property calculates a default value if one is not specified. """ if self._val_zjet_max is not None: return self._val_zjet_max val = 1.9326 * \ (-14.820 * self.Ri + 56.488123 * self.zL + 166.499069 * self.UStar + 188.253377) - 252.7267 rnd = min( max(self.grid.randgen.standard_cauchy(1) * 10 - 20, -160), 120) # !!!VERSION_INCONSISTENCY: I've used the standard # !!!Cuachy distribution, rather than 'PearsonIV' # !!!from indecipherable code. val += rnd self._val_zjet_max = val return val @zjet_max.setter def zjet_max(self, val): self._val_zjet_max = val ### These are the 'Chebyshef' coefficients, copied from the Modules.f90 file of TurbSim v1.x. ### The coefficients are: ## Column Definition ## 1 Jet maximum wind speed coefficient ## 2 Turbine Richardson number coefficient ## 3 uStar over the rotor coefficient ## 4 constant ## The rows are each of the 0-10th Chebyshev basis functions. spd_coefs = np.array([ [ # CASE ( 1 ) ! 70-90 m [0.856851, 7.51E-02, 1.39276, 0.894127], [-4.88E-02, 0.576344, 1.23582, 1.72687], [1.39E-02, 9.67E-02, 1.36737, -0.723851], [0.100585, 0.234968, -1.06287, -0.372353], [-7.69E-02, -0.154071, -0.301483, 0.150179], [8.53E-03, 0.104602, -0.382453, 0.520224], [-4.44E-03, -4.80E-02, 0.219135, -0.266775], [2.63E-02, -3.08E-02, -6.94E-02, -0.210521], [-2.01E-02, -5.61E-02, 0.220825, 0.179622], [8.11E-03, 3.96E-02, 0.109793, -3.81E-02], [4.99E-03, 5.00E-02, -0.124887, -0.11035], ], [ # CASE ( 2 ) ! 90-110 m [0.741241, -0.122521, 0.875062, 1.43294], [-0.264131, 0.28827, 0.717571, 3.30541], [-5.92E-02, 3.86E-02, 1.09453, -0.377399], [0.13792, 0.175628, -0.57163, -0.539205], [-2.59E-02, -0.211126, -4.25E-02, -0.338308], [-1.02E-02, 0.153597, -0.197867, 0.570708], [-3.22E-02, -8.17E-02, -9.63E-02, 0.19095], [2.72E-02, 3.09E-02, -0.249399, -0.273684], [-1.60E-02, 8.88E-03, 0.132523, 9.58E-02], [-5.29E-03, 2.98E-02, 0.205812, 9.27E-02], [7.00E-03, -1.47E-02, -2.11E-02, -0.123083], ], [ # CASE ( 3 ) ! 110-130 m [0.809492, -1.41752, -0.817619, 1.64159], [-0.121866, -1.09012, -2.60044, 3.63875], [-0.105142, -0.263657, -5.60E-02, 0.374811], [8.33E-02, 0.625103, 0.422112, -0.199598], [-1.69E-02, -7.09E-02, 1.76933, -0.847721], [1.88E-02, 7.70E-02, -0.121062, 0.10533], [-3.15E-02, 2.50E-02, -7.39E-02, 0.299197], [3.48E-03, 4.25E-02, -6.52E-02, -4.29E-03], [-1.18E-02, -0.100754, 0.170602, 3.42E-02], [2.09E-02, 3.36E-02, -0.104123, -8.49E-02], [-2.91E-03, -3.52E-02, -0.258115, 4.81E-02], ], [ # CASE ( 4 ) ! 130-150 m [0.694325, -0.463252, 2.11406, 1.28643], [-0.269118, -1.31381, 2.13374, 3.46187], [-8.40E-02, -5.97E-02, 2.09803, -0.592335], [0.135657, -0.117732, -0.11134, -0.28161], [-1.29E-02, -0.239685, 0.151264, -0.412806], [3.54E-02, 0.513824, 0.673662, -0.519536], [-1.55E-02, 7.49E-03, 0.393002, 2.07E-02], [2.37E-02, 0.225841, 3.84E-02, -0.202507], [-3.26E-02, -0.239615, -0.133893, 0.29135], [1.52E-02, 7.15E-02, 0.25228, -0.113016], [7.19E-03, 9.79E-02, 0.252125, -0.173201], ], [ # CASE ( 5 ) ! 150-170 m [0.909534, 0.581254, -2.90539, -0.581377], [0.155834, -0.836954, -6.77075, 0.627044], [-8.99E-02, -5.28E-02, -2.0719, 2.44E-02], [7.01E-02, -0.152904, -0.348237, 0.460754], [-1.78E-02, -0.263166, 0.375798, -0.215738], [9.70E-03, 0.254932, 0.449286, -0.234], [7.46E-03, -0.304057, -0.122661, -7.14E-03], [-6.26E-03, -0.142341, -1.95E-02, 0.299841], [-2.59E-02, 0.174282, 0.193868, -5.81E-03], [2.54E-03, -8.22E-02, 1.84E-02, 6.77E-02], [5.77E-04, -5.43E-02, -7.69E-02, 2.96E-02], ], [ # CASE ( 6 ) ! 170-190 m [0.885753, -1.15015, 0.155218, -0.707043], [-2.53E-02, -2.65126, 0.850151, 1.85279], [-7.23E-02, -0.399161, 0.142486, -0.917176], [3.78E-02, 0.178924, 0.227745, 0.528861], [-6.43E-03, 5.42E-02, 0.359052, -0.26111], [5.33E-02, 0.1546, -0.335116, -0.602604], [-6.50E-03, -0.205907, -8.59E-02, 8.16E-02], [3.16E-02, 0.151199, -0.126411, -0.148609], [-3.95E-02, 0.127418, 0.158511, 0.20932], [-2.53E-02, -5.32E-02, 0.36536, 0.214466], [4.03E-03, 1.02E-02, -7.01E-03, -4.32E-02], ], [ # CASE ( 7 ) ! 190-210 m [0.735269, -1.48574, 0.983734, 0.887351], [0.233065, -0.850536, -1.17754, -0.880493], [-0.172346, -0.862128, 1.20075, 3.48E-02], [8.04E-02, 5.24E-02, -0.916548, 0.247144], [2.88E-02, 0.112064, 1.51E-04, -0.466186], [-2.75E-02, -9.01E-02, -0.321617, 0.379162], [-1.08E-02, -0.161368, -2.51E-04, -1.33E-02], [5.09E-02, 0.228507, 0.195942, -0.45807], [-1.98E-02, -7.23E-02, 6.66E-02, 0.133182], [-5.57E-03, -5.31E-02, 2.44E-02, 5.60E-02], [3.71E-03, -1.63E-02, -5.44E-02, -1.40E-02], ], [ # CASE ( 8 ) ! 210-230 m [0.723721, -0.691359, -0.147971, 1.16041], [0.18799, 0.370199, 0.354538, -0.494962], [-0.204727, -0.166723, 0.682431, 0.367566], [1.40E-02, 0.334677, 0.169944, 0.494211], [3.84E-02, 0.258361, 0.389453, -0.625709], [-6.62E-03, -2.19E-02, -0.606278, 0.205521], [-2.54E-02, -0.17744, 7.49E-02, 7.61E-02], [5.03E-02, 7.97E-02, -9.98E-02, -0.312218], [-2.25E-02, 2.20E-02, 0.263227, 0.123311], [-1.43E-02, -2.01E-02, -5.14E-02, 0.159391], [2.64E-03, 3.46E-02, -0.12318, -2.22E-02], ], [ # CASE ( 9 ) ! 230-250 m [0.717665, -0.294178, -0.521541, 0.876418], [0.183182, -0.52658, -1.34668, 0.414396], [-0.196162, 9.84E-02, -3.83E-02, 0.156018], [2.92E-02, -0.362193, -0.658593, 0.521854], [3.37E-02, 0.108203, 0.318667, -0.375309], [-8.24E-03, 0.128457, -0.149225, 0.1621], [-3.06E-02, -0.210106, 4.55E-02, 8.42E-02], [3.02E-02, 0.184626, 9.46E-02, -0.215191], [7.03E-03, 2.49E-02, 3.13E-02, -9.70E-02], [-3.06E-03, -4.82E-02, -9.70E-02, 5.82E-02], [-9.57E-03, -3.93E-02, -0.125623, 0.112639], ], [ # CASE ( 10 ) ! 250-270 m [0.786229, -0.164848, 0.244948, -0.126263], [0.15218, -0.153233, -0.558524, 0.84425], [-0.130716, -0.217411, 0.13439, -0.536893], [1.70E-03, 5.49E-02, 0.551012, 0.335778], [2.47E-02, 2.82E-02, 0.290918, -0.223416], [1.48E-02, 5.94E-02, -0.277959, 3.91E-02], [-4.43E-02, 6.99E-03, 0.302386, 0.123719], [2.07E-02, 4.05E-02, -0.256155, -5.84E-02], [4.51E-03, -4.37E-02, -0.111911, -9.20E-03], [4.05E-03, -6.90E-03, 0.14697, -7.03E-02], [-6.68E-03, 1.53E-02, -2.55E-02, 4.97E-02], ], [ # CASE ( 11 ) ! 270-290 m [0.715734, -0.772062, -0.556396, 1.02929], [0.322509, -0.465616, -0.671711, -1.2413], [-0.166728, -0.281268, 0.924893, -0.282907], [1.27E-02, -0.342767, -1.10823, 0.516431], [3.80E-02, 5.35E-03, 0.833719, -0.510102], [1.97E-02, 0.279705, -0.179026, -4.36E-02], [-4.74E-02, -0.227673, 9.00E-02, 0.341958], [8.99E-03, -1.92E-02, -0.433969, 5.90E-02], [4.34E-03, 8.12E-02, 0.25764, -0.148492], [1.03E-02, 3.24E-02, 0.141971, -0.105207], [-4.84E-03, -1.99E-02, 7.33E-02, 2.84E-02], ], [ # CASE ( 12 ) ! 290-310 m [0.723348, -0.289581, -1.10618, 0.970713], [0.283383, 1.12986, -0.152861, -0.653269], [-0.16513, 0.295047, 0.245326, -7.06E-02], [8.55E-03, 9.38E-02, -0.826824, 0.283436], [3.45E-02, 0.364581, 0.566317, -0.521081], [2.83E-02, 0.107252, -0.124867, -4.80E-02], [-3.57E-02, -0.230151, -6.88E-02, 0.231208], [5.62E-04, 1.40E-02, -0.334942, 0.121313], [-6.35E-03, -6.19E-02, 0.139396, 2.77E-02], [1.14E-02, -2.67E-02, 0.24201, -0.127337], [1.71E-04, -6.37E-04, 4.39E-02, -5.61E-03], ], [ # CASE ( 13 ) ! 310-330 m [0.736987, -0.103727, 9.95E-02, 0.343208], [0.28285, 0.370583, 1.17749, -0.490259], [-0.130451, -0.557928, -0.272771, -0.230816], [-1.83E-02, 1.00E-01, -0.367321, 0.486971], [2.66E-02, -0.149206, 0.365342, -0.318809], [4.16E-02, 3.60E-02, -0.801161, 6.00E-06], [-2.36E-02, 1.96E-04, 0.340449, 2.72E-02], [1.30E-03, 0.214384, 0.125371, -8.47E-02], [-1.23E-02, 4.75E-02, 0.182118, 1.78E-02], [4.63E-03, -0.1309, -0.130584, 2.35E-02], [9.03E-04, -6.18E-02, -7.85E-03, 1.17E-02], ], [ # CASE ( 14 ) ! 330-350 m [0.706488, -1.21766, 1.08617, 0.674247], [0.341777, 2.27476, 3.81434, -2.32363], [-0.112822, 7.53E-02, 0.221349, -0.700428], [-1.99E-02, -1.95E-02, 0.947788, 4.68E-02], [3.08E-02, 0.334947, 0.10847, -0.534662], [5.21E-02, 0.349056, -1.14517, -0.147474], [-1.67E-02, -0.143994, -0.409398, 0.228081], [-1.75E-03, -0.115198, 3.23E-03, 0.100094], [-2.30E-02, -5.63E-02, 0.168561, 0.159537], [-6.41E-03, -8.48E-02, 0.135087, 8.81E-02], [1.13E-03, 2.07E-02, 9.18E-02, -3.77E-02], ], [ # CASE ( 15 ) ! 350-370 m [0.721629, -0.941544, 0.923908, 0.543678], [0.346956, -0.281582, -2.32358, -0.244435], [-0.109484, 0.275053, 0.86928, -0.771081], [-3.96E-02, -0.790621, -8.84E-02, 0.723378], [1.59E-02, -0.394222, -0.479505, -8.67E-02], [2.68E-02, 0.466895, 0.522378, -0.263669], [-9.57E-03, -8.52E-02, 1.11E-02, 3.20E-02], [3.46E-04, -5.34E-02, 0.15998, 0.108225], [-1.10E-02, -0.116864, -6.06E-02, 6.09E-02], [-2.93E-03, 2.72E-02, 5.08E-02, 7.50E-03], [-2.04E-03, -2.07E-02, -3.07E-02, 3.58E-02], ], [ # CASE ( 16 ) ! 370-390 m [0.732127, -2.66819, -7.94E-02, 0.676096], [0.285167, 3.89442, -0.917426, 0.104248], [-8.38E-02, 0.235268, -2.19E-03, -0.914663], [-3.98E-02, -0.858603, -0.538194, 0.843739], [-1.64E-02, 0.287007, -5.39E-02, 0.108834], [3.31E-02, 0.218726, 0.175636, -0.329844], [3.10E-05, -6.89E-02, 3.76E-02, -4.73E-02], [1.06E-02, -5.03E-02, 1.99E-02, 3.74E-02], [-1.05E-02, 9.92E-02, 0.11293, 2.26E-02], [-2.99E-03, -0.106831, 0.122628, 1.83E-02], [-7.32E-03, 3.52E-02, -3.36E-02, 8.59E-02], ], [ # CASE ( 17 ) ! 390-410 m [0.707698, 0.119876, 0.427545, 0.2468], [0.307273, 0.428003, -3.09224, 1.01117], [-7.33E-02, 0.51572, -0.229086, -0.792402], [-4.73E-02, 8.49E-02, -0.52415, 0.571084], [-2.83E-02, 0.165455, -0.691726, 0.349932], [2.17E-02, 0.258434, 0.170597, -0.236707], [-4.59E-03, -0.130722, 0.182955, -3.40E-02], [1.82E-02, 9.79E-02, 0.189511, -0.158597], [-7.84E-04, -2.50E-02, 0.137171, -5.77E-02], [-2.91E-03, -4.84E-02, 0.168698, 8.22E-03], [-4.67E-03, 1.75E-03, 1.80E-02, 4.41E-02], ], [ # CASE ( 18 ) ! 410-430 m [0.688761, -0.7286, -1.55711, 1.27145], [0.300421, 0.633115, 0.881706, -8.38E-03], [-6.81E-02, 0.210301, 0.610772, -0.714435], [-5.93E-02, -0.373997, -0.593894, 1.01556], [-4.26E-02, -2.45E-02, -0.400705, 0.399717], [1.39E-02, 6.09E-02, -0.161239, -3.06E-02], [-4.41E-03, -1.98E-02, 0.293288, -0.110401], [1.42E-02, 8.22E-02, -1.50E-02, -1.54E-02], [6.30E-03, -1.50E-02, -7.57E-02, -7.10E-02], [2.19E-03, -2.59E-02, 8.53E-02, -2.29E-02], [-2.76E-03, 1.68E-02, -8.77E-02, 3.27E-02], ], [ # CASE ( 19 ) ! 430-450 m [0.659495, -0.22327, -1.75403, 1.65777], [0.384097, 1.06351, 2.53779, -1.63428], [-2.42E-02, 0.113735, -1.42805, -0.690773], [-3.30E-02, 8.60E-02, -1.00836, 0.764307], [-2.76E-02, 0.297567, 0.697445, -0.187071], [1.21E-02, 0.212621, -0.570822, 1.23E-02], [-2.22E-02, 0.166286, 0.50751, 1.87E-02], [1.52E-02, 5.81E-02, -0.256912, -5.10E-02], [2.11E-03, -1.45E-02, -8.94E-02, -2.00E-02], [3.06E-03, 1.60E-02, 7.45E-02, -3.77E-02], [-1.84E-04, -1.56E-02, -6.25E-02, 1.57E-02], ], [ # CASE ( 20 ) ! 450-470 m [0.64099, -2.02496, 0.427597, 1.52166], [0.391609, 2.03441, -0.122486, -1.03579], [8.28E-03, 0.5942, -0.42469, -1.35655], [-2.54E-02, -0.826812, -0.812187, 0.911776], [-2.77E-02, -9.73E-03, 0.315974, 2.34E-02], [1.37E-02, 0.365984, 0.141952, -0.299349], [-1.95E-02, -0.406182, 2.32E-02, 0.184752], [7.34E-03, 8.54E-02, -0.255458, 7.08E-02], [1.54E-03, 5.82E-02, -5.72E-02, -6.37E-02], [5.11E-03, -6.11E-02, -7.04E-03, -3.64E-02], [1.97E-03, -1.09E-02, -8.18E-02, -6.03E-03], ], [ # CASE ( 21 ) ! 470-490 m [0.547127, -0.327778, 2.00666, 2.67869], [0.427112, 8.56E-02, -1.61197, -1.17989], [6.23E-02, 0.760714, -0.659927, -2.30882], [-4.04E-02, -0.873328, -0.118326, 1.19626], [-4.85E-03, 0.130813, -0.169613, -0.181674], [4.82E-03, 0.289038, 7.34E-02, 6.45E-03], [-2.49E-02, -0.375342, 0.15139, 0.208253], [9.48E-04, 5.23E-02, -0.213227, 0.137941], [-9.18E-03, 3.91E-02, 7.26E-02, 4.73E-02], [-6.00E-05, 1.03E-02, 7.46E-03, 1.86E-02], [-2.21E-03, -9.70E-05, -7.13E-02, 4.29E-02], ], ]) dir_coefs = np.array([ [ # CASE ( 1 ) ! 70-90 m [5.07735, 96.4785, 18.8465, 110.986], [0.75209, -16.5103, -25.9592, 9.05636], [-1.50806, 1.69319, -7.7859, 13.3041], [1.11287, 3.711, 13.1084, -11.9491], [-0.987363, -2.93059, -4.75454, 9.04282], [0.65727, 0.560223, -0.541911, -5.33397], [-0.493572, -0.455574, 2.03972, 3.53745], [0.244207, 0.390402, 1.5338, -1.9793], [-1.26E-02, 0.19732, -2.70454, 0.179412], [9.13E-04, 9.65E-02, 0.304467, 4.79E-02], [-7.71E-02, -0.11096, 0.51028, 0.585717], ], [ # CASE ( 2 ) ! 90-110 m [2.98622, 87.1045, 41.7453, 124.301], [0.241282, -10.9238, -31.5696, 11.0764], [-0.380786, -1.71395, -8.35561, 3.68007], [0.287014, 6.76407, 17.1736, -7.4345], [-0.682991, -5.48805, -12.7947, 10.9313], [0.415999, 2.36938, 4.47285, -5.47595], [-0.184533, -7.04E-02, 0.81309, 1.06891], [0.152381, -0.344921, 3.40496, -1.81465], [-0.113556, -1.02575, -5.54619, 2.51668], [3.87E-02, 1.0794, 0.98668, -0.942351], [7.37E-02, -0.284347, 1.12315, -1.04163], ], [ # CASE ( 3 ) ! 110-130 m [-10.8064, 63.1523, 18.7751, 255.252], [1.89875, -15.7662, -27.2545, -5.90699], [-1.81141, -7.58E-03, 4.49E-02, 19.4007], [-0.420216, 4.54261, 16.6642, -1.5632], [3.09E-02, 0.162346, -5.68196, 1.70168], [0.372585, -0.888944, -0.400871, -3.98736], [0.137532, -1.86E-02, -1.97659, -1.07897], [7.11E-02, 0.275322, 2.06716, -0.99703], [-0.142081, 0.690143, 1.74256, 0.963168], [-0.225792, -0.215169, 0.660299, 1.89319], [1.91E-02, -0.23, -1.69222, 0.190668], ], [ # CASE ( 4 ) ! 130-150 m [0.270461, 107.786, 140.705, 143.549], [2.46519, 25.9261, 54.6629, -43.2182], [-1.11746, -4.09287, -5.71316, 16.4144], [-0.104557, 2.88836, 14.657, -5.58632], [1.4104, -0.862421, 1.88282, -13.3856], [-0.994103, 6.07897, 6.16378, 6.53327], [0.440338, -7.14173, -12.2957, 0.653282], [-0.705677, 2.13336, 2.39331, 5.62277], [0.398742, -3.5049, -3.97854, -1.68531], [-7.72E-02, 2.14124, 3.42657, -0.982025], [0.120525, -1.80518, -3.44124, 0.391772], ], [ # CASE ( 5 ) ! 150-170 m [10.3894, 203.711, 87.9736, 0.818669], [4.15105, 37.734, 56.1061, -72.0928], [-1.60031, -6.42686, 2.99983, 21.7355], [0.162421, -22.7335, 4.23498, 0.433394], [-1.00817, -1.82237, -17.2291, 18.8346], [0.591051, 5.30019, 22.1782, -15.2786], [-0.350898, -1.35238, -14.9057, 9.09022], [0.512704, 5.33682, 12.0501, -11.3284], [-0.294613, -6.61282, -13.756, 9.48747], [0.180824, 6.67558, 8.1748, -6.39538], [-0.168678, -3.5973, -2.92266, 3.62255], ], [ # CASE ( 6 ) ! 170-190 m [-3.05838, 92.242, -6.17694, 218.678], [-1.19176, 10.9436, 5.33317, 23.6574], [0.396791, 5.36609, 14.86, -12.1807], [-0.260044, -3.3155, -1.83325, 3.07872], [0.147588, 3.54423, 2.61624, -2.87076], [-3.09E-02, -0.298005, -3.99378, 2.512], [3.52E-02, 0.476622, 0.917889, -1.19482], [-0.10397, -3.13393, -1.34654, 2.38467], [0.111959, 0.768005, 1.09164, -1.84864], [-5.32E-02, -0.753046, 0.517477, 0.77376], [2.36E-02, -0.255733, -0.765475, -0.183366], ], [ # CASE ( 7 ) ! 190-210 m [2.63747, 48.8574, -148.839, 198.635], [0.276349, 8.15568, 11.5466, 4.89475], [-0.161153, -3.92434, 15.2465, -2.75263], [-0.215546, -6.05707, -0.221136, 2.96778], [-0.174687, 0.722833, 2.58751, 1.43519], [-3.24E-03, 0.841219, 2.36677, -0.541046], [-0.14379, -0.422125, 6.03272, -3.55E-02], [4.94E-02, -0.165447, -1.64947, -0.118004], [6.88E-03, 0.618011, 0.600728, -0.312735], [-2.96E-02, -0.102388, -0.423526, 0.526055], [3.77E-03, -0.79762, -1.48591, 0.487559], ], [ # CASE ( 8 ) ! 210-230 m [1.25931, 81.7121, -72.2497, 192.288], [-0.421425, 0.812039, 26.4136, 12.7087], [-0.477334, -0.804493, 10.2938, 2.63738], [0.27025, -1.48414, 6.44E-02, -3.62925], [-0.206555, 2.60212, 4.78E-03, 1.41829], [0.199714, -0.145286, -1.43609, -1.0421], [-8.81E-02, -1.11826, 0.562309, 0.568182], [4.38E-02, -0.94946, -1.20199, 0.184361], [-5.13E-02, -0.157795, -0.596316, 0.747777], [5.03E-02, 6.23E-02, -0.821348, -0.411198], [-2.45E-02, 3.66E-03, 0.61934, 0.147334], ], [ # CASE ( 9 ) ! 230-250 m [4.99773, 45.439, -22.9981, 142.166], [1.34923, -0.690733, 1.11037, -7.00256], [-4.58E-02, -1.48399, 3.15438, -1.20619], [-5.86E-02, -0.324401, -0.520264, 0.827308], [6.67E-02, 1.95293, -1.46579, -1.66186], [2.23E-02, 1.10257, 1.61038, -0.14154], [4.83E-02, -0.46633, 0.318096, -1.22718], [-3.56E-02, -0.905797, -0.659337, 1.10221], [-6.54E-04, 0.514329, 0.38488, -0.221416], [2.40E-03, -0.307029, -0.455799, 0.167602], [5.79E-03, -0.3575, -6.82E-02, -1.79E-02], ], [ # CASE ( 10 ) ! 250-270 m [2.87491, 81.7603, -14.221, 143.973], [0.176626, 0.711168, 14.3778, 3.41781], [-0.112353, -4.44334, 5.01439, -0.539061], [0.135496, 0.868787, -2.54952, -1.4882], [-5.87E-02, 7.34E-02, 0.618705, 0.341871], [4.36E-02, 1.16076, -2.2411, 0.371484], [-4.21E-03, -0.219162, 3.07613, -1.48294], [2.91E-02, -7.90E-02, -2.06058, 0.637811], [6.84E-04, 0.398542, -0.227958, -0.195655], [-1.33E-02, -0.148014, 0.112677, 0.28039], [4.56E-02, -0.4372, -1.05259, -0.39506], ], [ # CASE ( 11 ) ! 270-290 m [-3.74E-02, 5.72313, -25.8459, 204.708], [0.387587, 5.70337, 37.0722, -5.10619], [0.130067, 8.86213, 7.6219, -6.77984], [-1.83E-02, -4.80402, 1.26728, 1.1988], [-0.125984, 5.69111, -2.4798, 0.370193], [7.02E-02, -4.02809, 0.545202, 0.396538], [-4.89E-02, 1.99119, -7.47E-02, -0.617665], [7.28E-02, -1.94844, -0.9012, 0.174322], [-2.75E-02, 0.875895, 8.29E-02, 1.47E-02], [-4.90E-03, -0.26505, 0.684299, -0.101304], [-2.46E-03, -9.03E-02, -0.25124, 0.130552], ], [ # CASE ( 12 ) ! 290-310 m [4.48806, 101.681, -24.2152, 108.849], [1.12228, -11.8153, -5.83094, -3.59506], [0.152934, 0.610899, 10.1148, -6.59595], [6.76E-02, 1.44362, -8.36227, 1.70741], [-8.86E-02, 1.22016, 4.89384, -1.422], [1.14E-02, -0.801065, -4.6529, 2.29577], [-5.68E-03, -0.156515, 3.48364, -1.85745], [3.21E-02, 0.643855, -1.80571, 0.499593], [-5.96E-03, -0.645, 1.0105, -0.256849], [-1.79E-02, 0.137457, -7.45E-03, 0.232805], [-5.07E-04, -1.20E-03, -0.280138, 9.13E-02], ], [ # CASE ( 13 ) ! 310-330 m [0.253568, 43.3822, 42.3741, 166.917], [-0.210713, 14.3161, 12.187, 9.66539], [0.176871, -3.28688, -2.78059, -1.64384], [0.30952, 2.34743, -5.8261, -3.72051], [-0.211586, -1.38792, -0.891686, 3.26282], [0.114874, -1.0177, -2.95833, -0.285227], [-0.168163, 1.33608, 5.32715, 0.270668], [0.106821, 0.746965, -1.28128, -1.11127], [-2.17E-02, 0.198171, 0.911532, 2.31E-02], [-5.64E-03, 0.278658, 0.250055, -9.16E-02], [7.21E-03, 2.24E-02, 6.76E-02, -0.1011], ], [ # CASE ( 14 ) ! 330-350 m [1.4365, 104.113, 86.7884, 138.082], [1.01951, -22.4231, 8.14651, -3.0374], [-0.14238, 5.5217, -8.37098, 1.9052], [-8.04E-02, 2.56411, 8.01756, 0.450076], [7.34E-03, -3.31792, -10.0037, 1.66433], [-3.82E-02, 3.00083, 6.14358, -0.656165], [0.113861, -4.41267, -2.98194, -1.24882], [-0.154066, 4.29174, 3.74587, 1.4816], [0.127996, -2.88696, -2.49795, -1.24336], [-6.71E-02, 1.70388, 0.935254, 0.748082], [8.19E-03, -4.50E-02, -0.263839, -5.18E-02], ], [ # CASE ( 15 ) ! 350-370 m [-0.675054, 121.016, 0.173435, 199.751], [-0.52795, 26.7663, 36.6465, 8.14164], [0.686068, -2.58652, 1.37125, -12.8021], [-0.115391, -0.715049, 0.225913, 2.68255], [0.127924, 1.18619, -3.81934, -2.40047], [-0.201212, -1.51136, 4.51548, 3.23679], [0.175571, -0.664591, -5.74074, -2.24143], [-0.107098, 0.889236, 3.25149, 1.18349], [3.15E-02, -6.48E-02, -0.882842, -0.404645], [-9.69E-03, -0.486174, -0.284323, 0.336898], [1.04E-03, -0.144399, -6.10E-02, 6.62E-02], ], [ # CASE ( 16 ) ! 370-390 m [0.610558, -90.3161, -86.1311, 221.346], [-0.878196, 0.234356, -1.96802, 30.3835], [0.536954, 2.31986, 0.611791, -11.624], [-0.203843, -2.10521, -1.77538, 5.20693], [-6.04E-02, -1.53784, 0.391834, 1.09004], [-3.32E-02, 1.08307, 0.756223, 0.579045], [2.20E-03, 1.00851, 0.872176, -1.24302], [-4.70E-02, 0.313443, -5.20E-02, 1.24129], [0.105906, 2.60251, -0.805126, -2.35033], [-3.95E-02, -0.866726, 0.244709, 0.996069], [5.34E-02, 0.423689, -0.910358, -0.888237], ], [ # CASE ( 17 ) ! 390-410 m [-0.256694, -53.0924, -28.899, 212.286], [0.368178, 0.200188, -15.1321, 9.40209], [-0.102825, -4.83546, 9.24228, -0.64019], [0.191961, 2.99238, -4.8869, -2.80575], [-9.33E-02, 0.237869, 3.72573, -8.03E-02], [1.70E-02, 2.22246, -0.874, 0.324301], [-4.39E-02, -1.22545, 1.03253, -7.41E-02], [9.07E-03, -0.438369, -1.85468, 0.746178], [-2.97E-02, -0.626331, 1.32958, 0.161941], [-4.73E-03, -0.639604, -0.50062, 0.398523], [7.78E-04, 0.203885, 0.111938, -9.66E-02], ], [ # CASE ( 18 ) ! 410-430 m [-1.05454, 19.3432, 14.3866, 209.914], [-5.37E-02, -6.69143, -5.48868, 13.8188], [0.130461, 1.84379, 10.2975, -6.85151], [0.120135, 3.25255, -4.64527, -0.957415], [-0.157071, -1.87681, 4.37492, 1.52585], [0.220174, 1.14707, -5.27774, -2.10403], [-0.185849, -8.73E-02, 4.5702, 1.45097], [5.77E-02, -0.265271, -2.17262, 1.19E-02], [-3.19E-02, 0.159054, 1.11463, 9.91E-02], [-9.31E-03, -0.514427, -0.486658, 0.472324], [5.84E-03, -6.98E-02, -6.53E-02, -7.68E-02], ], [ # CASE ( 19 ) ! 430-450 m [0.624689, 63.9533, -115.139, 203.718], [-0.249911, 8.56489, 12.0426, 11.2274], [0.208499, -2.38494, 8.76157, -7.17681], [-0.205812, 3.60713, 5.60652, 2.51439], [0.320606, -7.16713, -10.6408, -3.32927], [-0.178674, 5.15743, 3.70481, 2.92097], [0.101549, -5.22916, -1.89887, -1.64557], [-9.30E-02, 2.8729, 1.14221, 1.4604], [1.45E-02, -1.29998, -0.491218, -6.91E-02], [-6.95E-04, 0.830442, 1.25591, -0.451134], [-6.90E-04, 1.30E-02, -0.16423, 7.65E-02], ], [ # CASE ( 20 ) ! 450-470 m [4.30205, 83.823, -77.8869, 120.115], [0.11147, -2.13123, -13.0305, 11.4506], [5.36E-02, -9.82942, 3.21203, -2.14437], [3.12E-02, -0.694, -2.56494, 0.846492], [-3.97E-02, 0.628515, 0.898384, -0.403596], [0.187725, -1.32489, -3.10108, -1.64756], [-8.75E-02, -0.750003, 1.2358, 0.95118], [4.29E-02, 0.206995, -0.591777, -0.495133], [-3.25E-02, 0.187007, 0.351131, 0.374602], [-1.79E-02, -0.651232, -0.437205, 0.653204], [5.74E-03, 0.210108, -0.185616, -8.91E-02], ], [ # CASE ( 21 ) ! 470-490 m [0.685959, 76.5757, -26.8137, 187.31], [-0.229648, 3.36903, -12.3466, 19.5787], [5.56E-02, -6.33886, 2.64958, -2.35925], [-3.42E-02, -1.78314, 1.51304, 0.43034], [5.81E-02, 4.2818, -1.08668, -2.13185], [-1.94E-02, -2.76039, -0.573698, 1.97694], [1.26E-02, 0.932315, 0.974862, -1.5273], [1.04E-02, -0.143063, -0.728002, 0.464589], [1.21E-03, 0.262702, -0.133363, -0.236706], [-2.29E-04, -0.162697, -0.138587, 0.17236], [6.61E-03, -5.47E-02, -0.104054, -9.64E-02], ] ]) ``` #### File: pyts/specModels/nwtc.py ```python from .base import np, ts_float, specObj, specModelBase from ..misc import zL from .kelley_coefs import calc_nwtcup_coefs, p_coefs_unstable, f_coefs_unstable class genNWTC(specModelBase): """ An abstract base class for NWTC spectral models. """ def __call__(self, tsrun): """ Create and calculate the spectral object for a `tsrun` instance. Parameters ---------- tsrun : :class:`.tsrun` A TurbSim run object. Returns ------- out : :class:`.specObj` An NWTC spectral object for the grid in `tsrun`. """ out = specObj(tsrun) # !!!FIXTHIS: The following lines bind calculation to the # !!!MODEL. This goes against the PyTurbSim philosophy of # !!!keeping calculations separated from models. self.f = out.f self._work = np.zeros(out.n_f, dtype=ts_float) self.zhub = tsrun.grid.zhub # Fixing this will require something like changing: # out[comp][iz, iy] = model(z, u, comp) # to: # self.model(out, u, icomp, iz, iy) # Note here that u must be supplied explicitly because it is # not known to 'out'. # This would also require: # 1) deleting the self._work variable # 2) changing ``def L(self,)`` from a property to ``def # L(self, tsrun)`` and using tsrun.grid.zhub their. # 3) changing all calls to ``self.L`` accordingly. for iz in range(out.n_z): for iy in range(out.n_y): z = out.grid.z[iz] u = tsrun.prof.u[iz, iy] for comp in out.grid.comp: self._work[:] = 0.0 out[comp][iz, iy] = self.model(z, u, comp) return out class NWTC_stable(genNWTC): r"""The NWTC 'stable' spectral model. Parameters ---------- Ustar : float friction velocity [m/s]. zL : float The z/L stability parameter [non-dimensional] coef : array_like((3,2),dtype=float) spectral coefficients for this model. Notes ----- The specific form of this model is, .. math:: S_k(f) = \frac{U_{*}^2 A_k \hat{f}^{-1}\gamma}{1+B_k(f/\hat{f})^{5/3}} \qquad k=0,1,2\ (u,v,w) Where, :math:`\gamma=(\phi_E/\phi_M)^{2/3}` :math:`\hat{f}=\frac{\bar{u}\phi_M}{z}` :math:`\phi_E=(1+2.5 (z/L)^{0.6})^{1.5}` :math:`\phi_M=1+4.7*z/L` :math:`\bar{u}`, is the mean velocity at each grid-point, (taken from the profile model) :math:`A_k=\mathrm{scoef}[k,0] \mathrm{coef}[k,0]` :math:`B_k=\mathrm{scoef}[k,1] \mathrm{coef}[k,1]^{5/3}` See also -------- :attr:`s_coef` : These are the hard-wired :math:`\mathrm{scoef}` coefficients. """ s_coef = np.array([[79., 263.], [13., 32.], [3.5, 8.6]]) def __init__(self, Ustar, zL, coef=None): self.Ustar = Ustar self.Ustar2 = self.Ustar ** 2 self.zL = zL if coef is None: self.coefs = np.ones((3, 2), dtype=ts_float) else: self.coefs = coef def _sumfile_string(self, tsrun): sumstring_format = """ Turbulence model used = {dat.model_desc} Turbulence velocity (UStar) = {dat.Ustar:0.2f} [m/s] Stability parameter (z/L) = {dat.zL:0.2f} coefs u = [{p[0][0]:0.2f}, {p[0][1]:0.2f}] v = [{p[1][0]:0.2f}, {p[1][1]:0.2f}] w = [{p[2][0]:0.2f}, {p[2][1]:0.2f}] """ return sumstring_format.format(dat=self, p=self.coefs,) @property def _phie(self): return (1. + 2.5 * self.zL ** 0.6) ** 1.5 @property def _phim(self): return 1. + 4.7 * self.zL def model(self, z, u, comp): """ Calculate the spectral model for height `z`, velocity `u`, and velocity component `comp`. """ coef = self.coefs[comp] z_u = z / u denom = (self.f / self._phim) numer = (self._phie / self._phim) ** self.pow2_3 / self._phim * self.Ustar2 if coef.ndim > 1: self._work[:] = 0 for c in coef: self._work += self.model(z, u, comp) return self._work return (coef[0] * self.s_coef[comp, 0] * numer * z_u / (1. + self.s_coef[comp, 1] * (coef[1] * z_u * denom) ** self.pow5_3)) class NWTC_unstable(genNWTC): r"""The NWTC 'unstable' spectral model. .. math:: S_k(f) = U_\mathrm{star}^2 G_k(f,\bar{u},z,ZI) \qquad k = u, v, w Where :math:`G_k` is a function that depends on the frequency, :math:`f`, the mean velocity :math:`\bar{u}`, the height :math:`z`, and the mixing layer depth :math:`ZI`. The exact form of :math:`G_k` can be found in this class's 'model' method. Parameters ---------- Ustar : float The friction velocity (at the bottom boundary). zL : float The ratio of the HubHt to the Monin-Obhukov length. ZI : float The friction boundary layer height. p_coefs : array_like(3,2), optional Fit coefficients for this model. f_coefs : array_like(3,2), optional Fit coefficients for this model. """ def __init__(self, Ustar, zL, ZI, p_coefs=None, f_coefs=None): self.Ustar = Ustar self.Ustar2 = self.Ustar ** 2 self.zL = zL self.ZI = ZI if p_coefs is None: self.p_coefs = p_coefs_unstable if f_coefs is None: self.f_coefs = f_coefs_unstable def _sumfile_string(self, tsrun, ): sumstring_format = """ Turbulence model used = {dat.model_desc} Turbulence velocity (UStar) = {dat.Ustar:0.4g} [m/s] Mixing layer depth (ZI) = {dat.ZI:0.4g} [m] Stability parameter (z/L) = {dat.zL:0.4g} Monin-Obhukov Length scale = {Lmo:0.4g} [m] p_coefs u = [{p[0][0]:0.4g}, {p[0][1]:0.4g}] v = [{p[1][0]:0.4g}, {p[1][1]:0.4g}] w = [{p[2][0]:0.4g}, {p[2][1]:0.4g}] f_coefs u = [{f[0][0]:0.4g}, {f[0][1]:0.4g}] v = [{f[1][0]:0.4g}, {f[1][1]:0.4g}] w = [{f[2][0]:0.4g}, {f[2][1]:0.4g}] """ return sumstring_format.format(dat=self, Lmo=self.L, p=self.p_coefs, f=self.f_coefs,) @property def L(self, ): if not hasattr(self, 'zhub'): raise Exception("The Monin-Obhukov is unknown until this " "model has been '__call__'d.") return self.zhub / self.zL def model(self, z, u, comp): r""" Computes the spectrum for this 'unstable' spectral model. Parameters ---------- z : array_like (nz) Height above the surface [m]. u : array_like (nz,ny) Mean velocity [m/s]. comp : int {0,1,2} Index (u,v,w) of the spectrum to compute. Returns ------- spec : :class:`.specObj` The spectral object which contains the 'array' (property) of spectra at each point in the grid. Notes ----- The form of the u-compenent spectrum is: .. math:: S_u(f) = U_\mathrm{star}^2 \left( p_{u,1}\frac{\alpha}{1+( F_{u,1} \hat{f})^{5/3}} + p_{u,2}\frac{\beta}{( \delta_u + F_{u,2} f' )^{5/3} } \right ) The form of the v-compenent spectrum is .. math:: S_v(f) = U_\mathrm{star}^2 \left( p_{v,1}\frac{\alpha}{(1 + F_{v,1} \hat{f})^{5/3}} + p_{v,2}\frac{\beta}{( \delta_v + F_{v,2} f' )^{5/3} } \right ) The form of the w-compenent spectrum is: .. math:: S_w(f) = U_\mathrm{star}^2 \left( p_{w,1}\frac{\alpha}{(1 + F_{w,1} \hat{f})^{5/3}} \gamma + p_{w,2}\frac{\beta}{ 1 + F_{w,2} {f'} ^{5/3} } \right ) Where, :math:`\hat{f} = f ZI/\bar{u}` :math:`f' = f z/ \bar{u}` :math:`\alpha = ZI^{5/3}/(-\bar{u}L^{2/3})` :math:`\beta = z(1-z/ZI)^2/\bar{u}` :math:`\delta_u = 1+15 z/ZI` :math:`\delta_v = 1+2.8 z/ZI` :math:`\gamma = \frac{f'^2+0.09(z/ZI)^2}{f'^2+0.0225}` """ p_coef = self.p_coefs[comp] f_coef = self.f_coefs[comp] pow5_3 = self.pow5_3 z_ZI = z / self.ZI num0 = self.Ustar2 * self.ZI / u * (self.ZI / -self.L()) ** self.pow2_3 fZI_u = self.f * self.ZI / u z_u = z / u num1 = self.Ustar2 * z_u * (1 - z_ZI) ** 2 fz_u = self.f * z_u if comp == 0: tmp0 = 1 + 15 * z_ZI self._work = (p_coef[0] * num0 / (1 + (fZI_u * f_coef[0]) ** pow5_3) + p_coef[1] * num1 / (tmp0 + f_coef[1] * fz_u) ** pow5_3) elif comp == 1: tmp0 = 1 + 2.8 * z_ZI self._work = (p_coef[0] * num0 / (1 + f_coef[0] * fZI_u) ** pow5_3 + p_coef[1] * num1 / (tmp0 + f_coef[1] * fz_u) ** pow5_3) ## # Handle extra (e.g. wake, for outf_turb) coefficients: ## if coef.shape[0]>2 and not np.isnan(coef[2,0]+coef[2,1]): # self._work+=coef[2,0]*17*num1/(tmp0+coef[2,1]*9.5*fz_u)**pow5_3 else: self._work = (p_coef[0] * num0 / (1 + f_coef[0] * fZI_u) ** pow5_3 * np.sqrt((fz_u ** 2 + (0.3 * z_ZI) ** 2) / (fz_u ** 2 + 0.0225)) + p_coef[1] * num1 / (1 + f_coef[1] * fz_u ** pow5_3)) return self._work def smooth(Ustar, Ri, ZI=None): """ Compute the 'smooth' spectral model. Parameters ---------- Ustar : float The bottom-boundary friction velocity [m/s]. Ri : float The Richardson number stability parameter. ZI : float, optional mixing layer depth [m]. Only needed for Ri<0. Returns ------- specModel : :class:`NWTC_stable` (Ri>0), or :class:`NWTC_unstable` (Ri<0) """ zl_ = zL(Ri, 'smooth') if zl_ >= 0: out = NWTC_stable(Ustar, zl_) else: out = NWTC_unstable(Ustar, zl_, ZI) return out def nwtcup(Ustar, Ri, ZI=None): """ Compute the 'nwtcup' spectral model. Parameters ---------- Ustar : float The bottom-boundary friction velocity [m/s]. Ri : float The Richardson number stability parameter. ZI : float, optional mixing layer depth [m]. Only needed for Ri<0. Returns ------- specModel : :class:`NWTC_stable` (Ri>0), or :class:`NWTC_unstable` (Ri<0) """ zl_ = zL(Ri, 'nwtcup') coefs = calc_nwtcup_coefs(zl_) if zl_ >= 0: out = NWTC_stable(Ustar, zl_, coefs) else: out = NWTC_unstable(Ustar, zl_, ZI, coefs) return out ``` #### File: pyts/stressModels/main.py ```python from .base import stressModelBase, stressObj class uniform(stressModelBase): """Uniform Reynold's stress model. In this model each component of the Reynold's stress can be specified explicitly, but the values are uniform in space. Parameters ---------- upvp_ : float The u'v' component of Reynold's stress. upwp_ : float The u'w' component of Reynold's stress. vpwp_ : float The v'w' component of Reynold's stress. """ def __init__(self, upvp_=0.0, upwp_=0.0, vpwp_=0.0): """ Set the Reynold's stresses to be uniform over the rotor disk. """ self.vals = [upvp_, upwp_, vpwp_] def _sumfile_string(self, tsrun, ): sumstring_format = """ Stress model used = {dat.model_desc} u'v' = {dat.vals[0]:0.4g} [m^2/s^2] u'w' = {dat.vals[1]:0.4g} [m^2/s^2] v'w' = {dat.vals[2]:0.4g} [m^2/s^2] """ return sumstring_format.format(dat=self, ) def __call__(self, tsrun): """ Create and calculate the stress object for a `tsrun` instance. Parameters ---------- tsrun : :class:`.tsrun` A TurbSim run object. Returns ------- out : :class:`.stressObj` A stress object for the grid in `tsrun`. """ out = stressObj(tsrun) out.upvp_[:] = self.vals[0] out.upwp_[:] = self.vals[1] out.vpwp_[:] = self.vals[2] return out ```
{ "source": "jensguballa/andyBee", "score": 2 }
#### File: andyBee/app/gpx.py ```python from lxml import etree from app import geocache_db from geocache_model_sql import Cache, Cacher, CacheType, CacheContainer, CacheCountry, CacheState, CacheToAttribute, Waypoint, WaypointSym, WaypointType, Log, LogType, Attribute, UserNote from geocache import Geocache from db import DbInterface import re import datetime import time import calendar from dateutil.parser import parse GPX_NS = "http://www.topografix.com/GPX/1/0" GPX = "{%s}" % GPX_NS GS_NS = "http://www.groundspeak.com/cache/1/0/1" GS = "{%s}" % GS_NS XSI_NS = "http://www.w3.org/2001/XMLSchema-instance" XSI = "{%s}" % XSI_NS latmin = 0 latmax = 0 lonmin = 0 lonmax = 0 deleted_wpt = {} log_pool = {} cacher_pool = None def coords_to_string(coord, str1, str2): string = str1 if coord < 0: coord = -coord string = str2 degrees = int(coord) string += ' ' + str(degrees) + ' ' + '%.3f' % ((coord - degrees) * 60) return string def wpt_to_xml(parent, waypoint, geocache, data): w_wpt = None lat = waypoint['lat'] lon = waypoint['lon'] if waypoint['name'] == waypoint['gc_code']: if geocache['coords_updated']: lat = geocache['corr_lat'] lon = geocache['corr_lon'] data['latmin'] = min(data['latmin'], lat) data['latmax'] = max(data['latmax'], lat) data['lonmin'] = min(data['lonmin'], lon) data['lonmax'] = max(data['lonmax'], lon) w_wpt = subnode(parent, GPX+"wpt", attrib={'lat': str(lat), 'lon': str(lon)}) subnode(w_wpt, GPX+"time", text=waypoint['time']) subnode(w_wpt, GPX+"name", text=waypoint['name']) subnode(w_wpt, GPX+"cmt", text=waypoint['cmt']) subnode(w_wpt, GPX+"desc", text=waypoint['descr']) subnode(w_wpt, GPX+"url", text=waypoint['url']) subnode(w_wpt, GPX+"urlname", text=waypoint['urlname']) subnode(w_wpt, GPX+"sym", text=waypoint['sym']) subnode(w_wpt, GPX+"type", text=waypoint['type']) return w_wpt def geocache_to_xml(parent, geocache, data): wpt_node = None print "DB01", geocache['waypoints'] for waypoint in geocache['waypoints']: if waypoint['name'] == waypoint['gc_code']: wpt_node = wpt_to_xml(parent, waypoint, geocache, data) cache_node = subnode(wpt_node, GS+"cache", nsmap={'groundspeak':GS_NS}, attrib={ 'id': str(geocache['id']), 'available': "True" if geocache['available'] else "False", 'archived': "True" if geocache['archived'] else "False"}) subnode(cache_node, GS+"name", text=geocache['name']) subnode(cache_node, GS+"placed_by", text=geocache['placed_by']) subnode(cache_node, GS+"owner", text=geocache['owner'], attrib={'id': str(geocache['owner_id'])}) subnode(cache_node, GS+"type", text=geocache['type']) subnode(cache_node, GS+"container", text=geocache['container']) if len(geocache['attributes']): attr_node = subnode(cache_node, GS+"attributes") for attribute in geocache['attributes']: subnode(attr_node, GS+"attribute", text=attribute['name'], attrib={ 'id': str(attribute['gc_id']), 'inc': "1" if attribute['inc'] else "0"}) subnode(cache_node, GS+"difficulty", text=re.sub('\.0','', str(geocache['difficulty']))) subnode(cache_node, GS+"terrain", text=re.sub('\.0','',str(geocache['terrain']))) subnode(cache_node, GS+"country", text=geocache['country']) subnode(cache_node, GS+"state", text=geocache['state']) subnode(cache_node, GS+"short_description", text=geocache['short_desc'], attrib={'html': "True" if geocache['short_html'] else "False"}) orig_coords_txt = '' if geocache['coords_updated']: orig_coords_txt = 'Original coordinates: ' + coords_to_string(geocache['lat'], 'N', 'S') + ' ' + coords_to_string(geocache['lon'], 'E', 'W') if geocache['long_html']: orig_coords_txt = '<p>' + orig_coords_txt + '</p>' user_note = '' if geocache['note_present']: note = geocache_db.get_by_id(UserNote, geocache['id']) user_note = note['note'] if geocache['long_html']: user_note = '<div>' + user_note.replace("\n", "<br />") + '</div>' subnode(cache_node, GS+"long_description", text=geocache['long_desc'] + orig_coords_txt + user_note, attrib={'html': "True" if geocache['long_html'] else "False"}) subnode(cache_node, GS+"encoded_hints", text=geocache['encoded_hints']) if len(geocache['logs']) and (data['max_logs'] > 0): sort_logs = sorted(geocache['logs'], key=lambda log: log['date']) logs_node = subnode(cache_node, GS+"logs") for log in sort_logs[0:data['max_logs']]: log_node = subnode(logs_node, GS+"log", attrib={'id': str(log['id'])}) subnode(log_node, GS+"date", text=log['date']) subnode(log_node, GS+"type", text=log['type']) subnode(log_node, GS+"finder", text=log['finder'], attrib={'id': str(log['finder_id'])}) subnode(log_node, GS+"text", text=log['text'], attrib={'encoded': 'True' if log['text_encoded'] else 'False'}) if data['waypoints']: for waypoint in geocache['waypoints']: if waypoint['name'] == waypoint['gc_code']: wpt_to_xml(parent, waypoint, geocache, data) def subnode(parent, tag_name, text=None, attrib=None, nsmap=None): node = etree.SubElement(parent, tag_name, nsmap=nsmap) if text is not None: node.text = text if attrib is not None: for name, val in attrib.iteritems(): node.attrib[name] = val return node def export_gpx(data): data['latmin'] = 1000.0 data['latmax'] = -1000.0 data['lonmin'] = 1000.0 data['lonmax'] = -1000.0 root = etree.Element(GPX+"gpx", nsmap={None:GPX_NS, "xsi":XSI_NS}) root.attrib["version"] = "1.0" root.attrib["creator"] = "geodb, all rights reserved" root.attrib[XSI+"schemaLocation"] = "{} {}/gpx.xsd {} {}/cache.xsd".format(GPX_NS,GPX_NS,GS_NS,GS_NS) subnode(root, GPX+"name" , text="Cache Listing Generated by andyBee") subnode(root, GPX+"desc" , text="This is an individual list of geocaches generated by andyBee.") subnode(root, GPX+"author" , text="Hi, it's me: <NAME>") subnode(root, GPX+"email" , text="<EMAIL>") subnode(root, GPX+"url" , text="http://www.guballa.de") subnode(root, GPX+"urlname", text="Geocaching. What else?") subnode(root, GPX+"time" , text=datetime.datetime.now().isoformat()) subnode(root, GPX+"keyword", text="cache, geocache") bounds = subnode(root, GPX+"bounds") for id in data['list']: geocache = Geocache(id, geocache_db).fetch_singular() geocache_to_xml(root, geocache.get_data(), data) bounds.attrib['minlat'] = str(data['latmin']) bounds.attrib['minlon'] = str(data['lonmin']) bounds.attrib['maxlat'] = str(data['latmax']) bounds.attrib['maxlon'] = str(data['lonmax']) et = etree.ElementTree(root) return etree.tostring(et, pretty_print=True, encoding="UTF-8", xml_declaration=True) class GpxImporter(): def __init__(self, geocache_db, max_logs, pref_owner): self.waypoint_itf = DbInterface(geocache_db, Waypoint) self.waypoint_sym_itf = DbInterface(geocache_db, WaypointSym) self.waypoint_type_itf = DbInterface(geocache_db, WaypointType) self.cache_itf = DbInterface(geocache_db, Cache) self.cache_type_itf = DbInterface(geocache_db, CacheType) self.cache_state_itf = DbInterface(geocache_db, CacheState) self.cache_country_itf = DbInterface(geocache_db, CacheCountry) self.cache_container_itf = DbInterface(geocache_db, CacheContainer) self.cache_to_attribute_itf = DbInterface(geocache_db, CacheToAttribute) self.cacher_itf = CacherInterface(geocache_db, Cacher) self.log_type_itf = DbInterface(geocache_db, LogType) self.log_itf = LogInterface(geocache_db, Log) self.db = geocache_db self.deleted_wpt = {} self.max_logs = max_logs self.pref_owner = pref_owner self.last_updated = 0 def import_gpx(self, gpx_file): try: start = time.time() tree = etree.parse(gpx_file) end = time.time() except: return gpx = tree.getroot() if gpx.tag == GPX+"gpx": # First, parse all the common elements for node in gpx: if node.tag == GPX+"time": self.last_updated = calendar.timegm(parse(node.text).utctimetuple()) break # Second, parse all waypoints for node in gpx: if node.tag == GPX+"wpt": wpt = self._parse_wpt(node) self._merge_wpt(wpt) geocache_db.execute('''UPDATE waypoint SET cache_id = (SELECT cache.id FROM cache WHERE cache.gc_code = waypoint.gc_code) WHERE cache_id IS NULL''') self.db.commit() def _parse_wpt(self, node): wpt = Waypoint() wpt.cache = None wpt.db['lat'] = float(node.get("lat")) wpt.db['lon'] = float(node.get("lon")) for child in node: if child.tag == GPX+"time": wpt.db['time'] = child.text elif child.tag == GPX+"name": wpt.db['name'] = child.text wpt.db['gc_code'] = re.sub('^..', 'GC', child.text) elif child.tag == GPX+"desc": wpt.db['descr'] = child.text elif child.tag == GPX+"url": wpt.db['url'] = child.text elif child.tag == GPX+"urlname": wpt.db['urlname'] = child.text elif child.tag == GPX+"sym": wpt.sym = child.text wpt.db['sym_id'] = self.waypoint_sym_itf.create_singleton_value('name', child.text) elif child.tag == GPX+"type": #wpt.db['type_id'] = geocache_db.create_singleton_id(WaypointType, {'name': child.text}) wpt.db['type_id'] = self.waypoint_type_itf.create_singleton_value('name', child.text) elif child.tag == GPX+"cmt": wpt.db['cmt'] = child.text elif child.tag == GS+"cache": wpt.cache = self._parse_cache(child) wpt.db['cache_id'] = wpt.cache.db['id'] if wpt.cache is not None: # copy some values from the waypoint, so that join statements # can be avoided wpt.cache.db['hidden'] = wpt.db['time'] wpt.cache.db['lat'] = wpt.db['lat'] wpt.cache.db['lon'] = wpt.db['lon'] wpt.cache.db['gc_code'] = wpt.db['name'] wpt.cache.db['url'] = wpt.db['url'] wpt.cache.db['found'] = (wpt.sym == 'Geocache Found') return wpt def _parse_cache(self, node): cache = Cache() cache.db['last_updated'] = self.last_updated cache.db['id'] = int(node.get("id")) cache.db['available'] = (node.get("available") == "True") cache.db['archived'] = (node.get("archived") == "True") for child in node: if child.tag == GS+"name": cache.db['name'] = child.text elif child.tag == GS+"placed_by": cache.db['placed_by'] = child.text elif child.tag == GS+"owner": owner_id = int(child.get("id")) self.cacher_itf.create_singleton(owner_id, child.text) cache.db['owner_id'] = owner_id # geocache_db.create_singleton_id(Cacher, {'id': child.get("id") , 'name': child.text}) elif child.tag == GS+"type": #cache.db['type_id'] = geocache_db.create_singleton_id(CacheType, {'name': child.text}) cache.db['type_id'] = self.cache_type_itf.create_singleton_value('name', child.text) elif child.tag == GS+"container": #cache.db['container_id'] = geocache_db.create_singleton_id(CacheContainer, {'name': child.text}) cache.db['container_id'] = self.cache_container_itf.create_singleton_value('name', child.text) elif child.tag == GS+"difficulty": cache.db['difficulty'] = float(child.text) elif child.tag == GS+"terrain": cache.db['terrain'] = float(child.text) elif child.tag == GS+"country": #cache.db['country_id'] = geocache_db.create_singleton_id(CacheCountry, {'name': child.text}) cache.db['country_id'] = self.cache_country_itf.create_singleton_value('name', child.text) elif child.tag == GS+"state": #cache.db['state_id'] = geocache_db.create_singleton_id(CacheState, {'name': child.text}) cache.db['state_id'] = self.cache_state_itf.create_singleton_value('name', child.text) elif child.tag == GS+"short_description": cache.db['short_desc'] = child.text cache.db['short_html'] = (child.get("html") == "True") elif child.tag == GS+"long_description": cache.db['long_desc'] = child.text cache.db['long_html'] = (child.get("html") == "True") elif child.tag == GS+"encoded_hints": cache.db['encoded_hints'] = child.text elif child.tag == GS+"attributes": cache.attributes = [] for node_attr in child: if node_attr.tag == GS+"attribute": cache.attributes.append(self._parse_attribute(node_attr)) elif child.tag == GS+"logs": cache.logs = [] for node_log in child: if node_log.tag == GS+"log": cache.logs.append(self._parse_log(node_log, cache.db['id'])) return cache def _parse_attribute(self, node): attr = Attribute() attr.db['gc_id'] = int(node.get("id")) attr.db['inc'] = (node.get("inc") == "1") attr.db['name'] = node.text return attr def _parse_log(self, node, cache_id): log = Log() log.db['id'] = int(node.get("id")) log.db['cache_id'] = cache_id for log_node in node: if log_node.tag == GS+"date": log.db['date'] = log_node.text elif log_node.tag == GS+"type": #log.db['type_id'] = geocache_db.create_singleton_id(LogType, {'name': log_node.text}) log.db['type_id'] = self.log_type_itf.create_singleton_value('name', log_node.text) elif log_node.tag == GS+"finder": log.db['finder_id'] = int(log_node.get("id")) log.finder = log_node.text elif log_node.tag == GS+"text": log.db['text'] = log_node.text log.db['text_encoded'] = (log_node.get("encoded") == "True") elif log_node.tag == GS+"log_wpt": log.db['lat'] = float(log_node.get("lat")) log.db['lon'] = float(log_node.get("lon")) return log def _merge_wpt(self, wpt): gc_code = wpt.db['gc_code'] #cache_exists = geocache_db.get_singleton_id(Cache, {'gc_code': gc_code}) != None cache_exists = self.cache_itf.get_id('gc_code', gc_code) != None if cache_exists: if gc_code == wpt.db['name']: # waypoint for the cache itself geocache_db.execute('DELETE FROM waypoint WHERE gc_code = ? AND name = ?', (gc_code, gc_code)) else: # additional waypoint if gc_code not in self.deleted_wpt: geocache_db.execute('DELETE FROM waypoint WHERE gc_code = ? AND name != ?', (gc_code, gc_code)) self.deleted_wpt[gc_code] = True self.waypoint_itf.insert(wpt.db) if wpt.cache is not None: self._merge_cache(wpt.cache, cache_exists) def _merge_cache(self, cache, cache_exists): last_logs = self._merge_logs(cache.logs, cache.db['id']) cache.db['last_logs'] = last_logs if cache_exists: self.cache_itf.update(cache.db['id'], cache.db) else: self.cache_itf.insert(cache.db) self._merge_attributes(cache.attributes, cache.db['id'], cache_exists) def _merge_logs(self, logs, cache_id): db_logs = self.log_itf.get_cache_logs(cache_id) merged_array = [] for log in logs: if log.db['id'] in db_logs: del db_logs[log.db['id']] merged_array.append({'id': log.db['id'], 'date': log.db['date'], 'finder': log.finder, 'type_id': log.db['type_id'], 'action': 'update', 'db': log.db}) else: merged_array.append({'id': log.db['id'], 'date': log.db['date'], 'finder': log.finder, 'type_id': log.db['type_id'], 'action': 'insert', 'db': log.db}) for log in db_logs.values(): merged_array.append(log) sorted_logs = sorted(merged_array, key=lambda log: log['date'], reverse=True) log_cntr = 1 for log in sorted_logs: if (log_cntr <= self.max_logs) or (log['finder'] == self.pref_owner): if log['action'] == 'insert': self.cacher_itf.create_singleton(log['db']['finder_id'], log['finder']) self.log_itf.insert(log['db']) elif log['action'] == 'update': self.cacher_itf.create_singleton(log['db']['finder_id'], log['finder']) self.log_itf.update(log['id'], log['db']) else: self.log_itf.delete('id', log['id']) log_cntr = log_cntr + 1 last_logs = ';'.join([self.log_type_itf.get_value(log['type_id'], 'name') for log in sorted_logs[:5]]) return last_logs def _merge_attributes(self, attributes, cache_id, cache_exists): if cache_exists: self.cache_to_attribute_itf.delete('cache_id', cache_id) for attr in attributes: id = geocache_db.create_singleton_id(Attribute, attr.db) self.cache_to_attribute_itf.insert({'cache_id': cache_id, 'attribute_id': id}) class AttributeInterface(): def __init__(self, db, cls): DbInterface.__init__(self, db, cls) def get_id(self, columns): if self._reverse_lookup_table is None: for row in self.execute('SELECT id, gc_id, inc, name FROM attribute'): vals = '|'.join([row['gc_id'], row['inc'], row['name']]) self._reverse_lookup_table[vals] = id vals = '|'.join([columns['gc_id'], columns['inc'], columns['name']]) if vals in self._reverse_lookup_table: return self._reverse_lookup_table[vals] else: return None def create_singleton(self, columns): id = self.get_id(columns) if id is not None: return id self.insert(columns) id = self.db.cursor.lastrowid vals = '|'.join([columns['gc_id'], columns['inc'], columns['name']]) self._reverse_lookup_table[vals] = id if self._lookup_table is not None: self._lookup_table[id] = value return id class CacherInterface(DbInterface): def __init__(self, db, cls): DbInterface.__init__(self, db, cls) def create_singleton(self, id, name): db_name = self.get_value(id, 'name') if db_name is None: self.insert({'id': id, 'name': name}) self._lookup_table[id] = name if self._reverse_lookup_table is not None: self._reverse_lookup_table[name] = id else: if name != db_name: self.update(id, {'name': name}) self._lookup_table[id] = name if self._reverse_lookup_table is not None: self._reverse_lookup_table[name] = id class LogInterface(DbInterface): def __init__(self, db, cls): DbInterface.__init__(self, db, cls) self._pool = {} for row in self.execute('SELECT id, date, cache_id, finder_id, type_id FROM log'): if row['cache_id'] not in self._pool: self._pool[row['cache_id']] = {} self._pool[row['cache_id']][row['id']] = {'id': row['id'], 'date': row['date'], 'finder_id': row['finder_id'], 'type_id': row['type_id'], 'action': 'none'} def get_cache_logs(self, cache_id): if cache_id in self._pool: return self._pool[cache_id] else: return {} ```
{ "source": "jensh007/gardenlinux", "score": 2 }
#### File: gardenlinux/ci/render_release_from_rc.py ```python import argparse import dataclasses import paths from glci.model import BuildType from glci.model import BuildTarget import render_pipeline_run as renderer import yaml def main(): parser = argparse.ArgumentParser() parser.add_argument( '--committish', help='commit of release canditate' ) parser.add_argument( '--branch', default='main', help='branch to build rc from (default: main)' ) parser.add_argument( '--gardenlinux-epoch', help='the gardenlinux epoch of the release-candidate', ) parser.add_argument( '--git-url', help='the git repository of the release-candidate', default='https://github.com/gardenlinux/gardenlinux.git' ) parser.add_argument( '--outfile', default='release-from-rc-run.yaml', help='name of rendered pipeline-run yaml', ) parser.add_argument('--disable-notifications', action='store_const', const=True, default=False) parser.add_argument('--additional-recipients', default=' ') parser.add_argument('--only-recipients', default=' ') build_targets = ( BuildTarget.BUILD, BuildTarget.MANIFEST, BuildTarget.COMPONENT_DESCRIPTOR, BuildTarget.FREEZE_VERSION, BuildTarget.GITHUB_RELEASE, ) parsed = parser.parse_args() setattr(parsed, 'cicd_cfg', 'default') setattr(parsed, 'pipeline_cfg', paths.flavour_cfg_path) setattr(parsed, 'oci_path', 'eu.gcr.io/gardener-project/gardenlinux') setattr(parsed, 'flavour_set', 'all') setattr(parsed, 'build_targets', build_targets) setattr(parsed, 'version',f'{parsed.gardenlinux_epoch}.0') setattr(parsed, 'pytest_cfg','default') setattr(parsed, 'promote_target', BuildType.RELEASE) setattr(parsed, 'gardenlinux_base_image', None) print(f'{parsed=}') pipeline_run = renderer.mk_pipeline_main_run( args=parsed, ) pipeline_run_dict = dataclasses.asdict(pipeline_run) with open(parsed.outfile, 'w') as f: yaml.safe_dump(pipeline_run_dict, f) print(f'pipeline-run written to {parsed.outfile}') if __name__ == '__main__': main() ``` #### File: tests/integration/util.py ```python import subprocess import logging import json import urllib.request import paramiko from googleapiclient.errors import HttpError logger = logging.getLogger(__name__) def get_my_ip(): """Obtain external visible IP address""" url='https://api.myip.com' response = urllib.request.urlopen(url) if response.status != 200: raise Exception(f'Unable to obtain this hosts public IP, got HTTP status {response.status} from {url}') doc = json.load(response) return doc['ip'] def get_public_key(private_key_file): k = paramiko.RSAKey.from_private_key_file(private_key_file) return k.get_name() + " " + k.get_base64() # gcp related def delete_firewall_rule(compute, project, name): try: request = compute.firewalls().delete(project=project, firewall=name) response = request.execute() logger.info(response) op_name = response['name'] logger.info(f'waiting for delete filewall rule {op_name=}') wait_for_global_operation(compute, project, op_name) except HttpError as h: if h.resp.status != 404: raise def ensure_firewall_rules(compute, project, restfw): name = restfw["name"] delete_firewall_rule(compute, project, name) request = compute.firewalls().insert(project=project, body=restfw) response = request.execute() logger.info(response) op_name = response['name'] logger.info(f'waiting for create filewall rule {op_name=}') wait_for_global_operation(compute, project, op_name) def wait_for_global_operation(compute, project, operation): response = compute.globalOperations().wait(project=project, operation=operation,).execute() if response["status"] != "DONE": logger.error("Operation failed %s" % json.dumps(response, indent=4)) error = "" if "error" in response: error = response["error"] raise Exception("Operation %s failed: %s" % (operation, error)) def get_config_value(config, key): if key in config and config[key] != "": return config[key] else: return None ```
{ "source": "JensHeinrich/funktrainer", "score": 3 }
#### File: funktrainer/data/parse_xml_to_sql_dump.py ```python import sys import os import re import xml.etree.ElementTree as ET global_category_id_seq = 0 global_question_id_seq = 0 global_topic_id_seq = 0 global_category_to_topic_id_seq = 0 global_question_to_category_id_seq = 0 global_answer_id_seq = 0 global_question_to_lichtblick_id_seq = 0 global_question_to_topic_id_seq = 0 global_exam_settings_id_seq = 0 topics = [] categories = [] questions = [] answers = [] question_to_category = [] question_to_topic = [] category_to_topic = [] question_to_lichtblick = [] exam_settings = [] mixtopics = {u"Klasse E alle (Technik, Betrieb, Vorschriften)": [u"Technische Kenntnisse (Klasse E)", u"Betriebliche Kenntnisse", u"Kenntnisse von Vorschriften"], } # [ nQuestions, nRequired, nSeconds ] examsettings = { u"Technische Kenntnisse (Klasse E)": [34, 34 * .73, 60 * 60], u"Technische Kenntnisse (Klasse A)": [51, 51 * .73, 60 * 60], u"Betriebliche Kenntnisse": [34, 34 * .73, 60 * 60], u"Kenntnisse von Vorschriften": [34, 34 * .73, 60 * 60], # TODO: sampling here is definitely wrong, as we uniformly sample # from all categories, but should sample 34 each from each # category u"Klasse E alle (Technik, Betrieb, Vorschriften)": [34 * 3, 34 * 3 * .73, 60 * 3 * 60], } def cleanuptagtext(text): r = re.sub(r'\n', " ", text) r = re.sub(r'[\t]{1,}', " ", r) r = re.sub(r'\'', "''", r) return r def createExamsettings(): global global_exam_settings_id_seq for k, v in examsettings.iteritems(): topicname = k nquestions, nrequired, nseconds = v ref_topic_id = 0 for topicid, order, name, primary in topics: if name == topicname: ref_topic_id = topicid break assert ref_topic_id != 0 global_exam_settings_id_seq += 1 exam_settings.append((global_exam_settings_id_seq, ref_topic_id, nquestions, nrequired, nseconds)) def createMixtopics(): global global_topic_id_seq global global_question_to_topic_id_seq global global_category_to_topic_id_seq for k, v in mixtopics.iteritems(): mixtopicname = k global_topic_id_seq += 1 mixtopicid = global_topic_id_seq mixtopicorder = global_topic_id_seq mixtopicprimary = 0 for submix in v: for topicid, order, name, primary in topics: if name == submix: for _, qid, tid in question_to_topic: if tid == topicid: global_question_to_topic_id_seq +=1 question_to_topic.append((global_question_to_topic_id_seq, qid, mixtopicid)) for _, cid, tid in category_to_topic: if tid == topicid: global_category_to_topic_id_seq += 1 category_to_topic.append((global_category_to_topic_id_seq, cid, mixtopicid)) topics.append((mixtopicid, mixtopicorder, mixtopicname, mixtopicprimary)) def parseChapter(chapter, parent, topic_id, primary=0): global global_category_id_seq global global_category_to_topic_id_seq global global_question_to_category_id_seq global global_question_to_lichtblick_id_seq global global_question_id_seq global global_answer_id_seq global global_question_to_topic_id_seq name = chapter.get('name') reference = chapter.get('id') categoryid = global_category_id_seq categories.append((categoryid, name, reference, primary, parent)) global_category_to_topic_id_seq += 1 category_to_topic.append((global_category_to_topic_id_seq, categoryid, topic_id)) # if this chapter has only questions for question in chapter.findall('{http://funktrainer.hosenhasser.de}question'): global_question_id_seq += 1 reference = question.get('id') next_time = 1 correct = 0 wrong = 0 level = 0 qhelp = "" lichtblick = int(question.get('lichtblick')) qtext = cleanuptagtext(question.find('{http://funktrainer.hosenhasser.de}textquestion').text) for aidx, answer in enumerate(question.findall('{http://funktrainer.hosenhasser.de}textanswer')): global_answer_id_seq += 1 answers.append((global_answer_id_seq, global_question_id_seq, aidx, cleanuptagtext(answer.text), " ")) questions.append((global_question_id_seq, reference, qtext, level, next_time, wrong, correct, qhelp)) global_question_to_category_id_seq += 1 question_to_category.append((global_question_to_category_id_seq, global_question_id_seq, categoryid)) global_question_to_lichtblick_id_seq += 1 question_to_lichtblick.append((global_question_to_lichtblick_id_seq, global_question_id_seq, lichtblick)) global_question_to_topic_id_seq += 1 question_to_topic.append((global_question_to_topic_id_seq, global_question_id_seq, topic_id)) # if this chapter has only subchapters for subchapter in chapter.findall('{http://funktrainer.hosenhasser.de}chapter'): global_category_id_seq += 1 parseChapter(subchapter, categoryid, topic_id) def main(funkfragen, output_dir): global global_category_id_seq global global_topic_id_seq global global_category_to_topic_id_seq tree = ET.parse(funkfragen) root = tree.getroot() for chapter in root.findall('{http://funktrainer.hosenhasser.de}chapter'): global_category_id_seq += 1 global_topic_id_seq += 1 topics.append((global_topic_id_seq, global_topic_id_seq, chapter.get('name'), 1)) global_category_to_topic_id_seq += 1 category_to_topic.append((global_category_to_topic_id_seq, global_category_id_seq, global_topic_id_seq)) parseChapter(chapter, 0, global_topic_id_seq, 1) createMixtopics() createExamsettings() with open(os.path.join(output_dir, 'scheme_and_data.sql'), 'w') as o: print >>o, "BEGIN;" print >>o, "DROP TABLE IF EXISTS topic;" print >>o, "DROP TABLE IF EXISTS category;" print >>o, "DROP TABLE IF EXISTS question;" print >>o, "DROP TABLE IF EXISTS answer;" print >>o, "DROP TABLE IF EXISTS question_to_category;" print >>o, "DROP TABLE IF EXISTS question_to_topic;" print >>o, "DROP TABLE IF EXISTS category_to_topic;" print >>o, "DROP TABLE IF EXISTS question_to_lichtblick;" print >>o, "DROP TABLE IF EXISTS topic_exam_settings;" # print >>o, "DROP TABLE IF EXISTS exam;" print >>o, "" print >>o, "CREATE TABLE topic (_id INT NOT NULL PRIMARY KEY, order_index INT NOT NULL UNIQUE, name TEXT NOT NULL, isprimary INT);" print >>o, "CREATE TABLE category (_id INT NOT NULL PRIMARY KEY, name TEXT NOT NULL, reference TEXT NOT NULL, isprimary INT, parent INT REFERENCES category(_id));" print >>o, "CREATE TABLE question (_id INT NOT NULL PRIMARY KEY, reference TEXT, question TEXT NOT NULL, level INT NOT NULL, next_time INT NOT NULL, wrong INT, correct INT, help TEXT);" print >>o, "CREATE TABLE answer (_id INT NOT NULL PRIMARY KEY, question_id INT NOT NULL REFERENCES question(_id), order_index INT, answer TEXT NOT NULL, help TEXT);" print >>o, "CREATE TABLE question_to_category (_id INT NOT NULL PRIMARY KEY, question_id INT NOT NULL REFERENCES question(_id), category_id INT NOT NULL REFERENCES category(_id));" print >>o, "CREATE TABLE question_to_topic (_id INT NOT NULL PRIMARY KEY, question_id INT NOT NULL REFERENCES question(_id), topic_id INT NOT NULL REFERENCES topic(_id));" print >>o, "CREATE TABLE category_to_topic (_id INT NOT NULL PRIMARY KEY, category_id INT NOT NULL REFERENCES category(_id), topic_id INT NOT NULL REFERENCES topic(_id));" print >>o, "CREATE TABLE question_to_lichtblick (_id INT NOT NULL PRIMARY KEY, question_id INT NOT NULL REFERENCES question(_id), lichtblick INT);" print >>o, "CREATE TABLE topic_exam_settings (_id INT NOT NULL PRIMARY KEY, topic_id INT NOT NULL REFERENCES topic(_id), number_questions INT, number_questions_pass INT, seconds_available INT);" # print >>o, "CREATE TABLE exam (_id INT NOT NULL PRIMARY KEY, topic_id INT NOT NULL REFERENCES topic(_id), time_started INT NOT NULL, time_left INT, exam_json STRING);" print >>o, "" # TODO: fix unicode for t in topics: bla = u"INSERT INTO topic VALUES (%i, %i, '%s', %i);" % t print >>o, bla.encode('utf-8') for c in categories: #print >>o, u"INSERT INTO category VALUES (%i, %s, %i, %i, %s);" % c #print >>o, u"INSERT INTO category VALUES ({}, {}, {}, {}, {});".format(*c) bla = u"INSERT INTO category VALUES ({}, '{}', '{}', {}, {});".format(*c) print >>o, bla.encode('utf-8') for q in questions: bla = u"INSERT INTO question VALUES (%i, '%s', '%s', %i, %i, %i, %i, '%s');" % q print >>o, bla.encode('utf-8') for a in answers: bla = u"INSERT INTO answer VALUES (%i, %i, %i, '%s', '%s');" % a print >>o, bla.encode('utf-8') for qtc in question_to_category: bla = u"INSERT INTO question_to_category VALUES (%i, %i, %i);" % qtc print >>o, bla.encode('utf-8') for qtt in question_to_topic: bla = u"INSERT INTO question_to_topic VALUES (%i, %i, %i);" % qtt print >>o, bla.encode('utf-8') for ctt in category_to_topic: bla = u"INSERT INTO category_to_topic VALUES (%i, %i, %i);" % ctt print >>o, bla.encode('utf-8') for qtl in question_to_lichtblick: bla = u"INSERT INTO question_to_lichtblick VALUES (%i, %i, %i);" % qtl print >>o, bla.encode('utf-8') for exs in exam_settings: bla = u"INSERT INTO topic_exam_settings VALUES (%i, %i, %i, %i, %i);" % exs print >>o, bla.encode('utf-8') print >>o, "COMMIT;" if __name__ == '__main__': if len(sys.argv) < 3: print >>sys.stderr, "usage: %s <funkfragen_lichtblick.xml> output_directory/" sys.exit(1) funkfragen = sys.argv[1] output_dir = sys.argv[2] main(funkfragen, output_dir) ```
{ "source": "jensheit/lazy_dataset", "score": 3 }
#### File: lazy_dataset/lazy_dataset/database.py ```python import json import weakref from pathlib import Path import typing import lazy_dataset class Database: """Base class for databases. This class is abstract! """ def __init__(self): self._dataset_weak_ref_dict = weakref.WeakValueDictionary() @property def data(self): """ Returns a nested dictionary with the following structure: { 'datasets': { <dataset_name_1>: { <example_id>: {...}, ... }, ... }, 'alias': { <dataset_name>: [<dataset_name_1>, <dataset_name_2>], ... } } Under the key `datasets` are the datasets, where each dataset is a dictionary that mapps from an example_id to the example. Is is assumed that the example is a dictionary and does not contain the keys `dataset` and `example_id`. These keys are added in the `get_dataset` method. Under the key `alias` are datasets listed that group multiple datasets to a new dataset. Beside the keys `datasets` and `alias` may exist further keys, they are ignored in the base class, but may be used in inherited database classes. In case of an speech audio mixture an example may look as follows: audio_path: speech_source: <path to speech of speaker 0> <path to speech of speaker 1> observation: blue_array: (a list, since there are no missing channels) <path to observation of blue_array and channel 0> <path to observation of blue_array and channel 0> ... red_array: (special case for missing channels) c0: <path to observation of red_array and channel 0> c99: <path to observation of red_array and channel 99> ... speech_image: ... speaker_id: <speaker_id for speaker 0> ... gender: <m/f> ... The example does not contain binary data (e.g. audio signals). Instead it contains the paths to them. In this may big databases fit into the memory. In a later map function the binary data can be loaded. We recommend to use absolute paths to the data, so that working with the example is as easy as possible. """ raise NotImplementedError( f'Override this property in {self.__class__.__name__}!') @property def dataset_names(self): """ A tuple of all available dataset names, i.e. the keys of `data['datasets']` and `data['alias']`. """ return tuple( self.data['datasets'].keys() ) + tuple( self.data.get('alias', {}).keys() ) def get_examples(self, dataset_name): """ Get examples dict for a certain dataset name. example_id and dataset name are added to each example. Do not make inplace manipulations of the returned dictionary!!! Args: dataset_name: the name of the requested dataset Returns: a dictionary with examples from the requested dataset """ try: if dataset_name in self.data.get('alias', []): dataset_names = self.data['alias'][dataset_name] examples = {} for name in dataset_names: examples_new = self.data['datasets'][name] intersection = set.intersection( set(examples.keys()), set(examples_new.keys()), ) assert len(intersection) == 0, intersection examples = {**examples, **examples_new} else: examples = {**self.data['datasets'][dataset_name]} except KeyError: import difflib similar = difflib.get_close_matches( dataset_name, self.dataset_names, n=5, cutoff=0) raise KeyError(dataset_name, f'close_matches: {similar}', self) if len(examples) == 0: # When somebody need empty datasets, add an option to this # function to allow empty datasets. raise RuntimeError( f'The requested dataset {dataset_name!r} is empty. ' ) for example_id in examples.keys(): examples[example_id] = { **examples[example_id], 'example_id': example_id, 'dataset': dataset_name, } return examples def get_dataset(self, name=None): """Return a single lazy dataset over specified datasets. Adds the example_id and dataset_name to each example dict. This function should never be overwritten. Args: name: list or str specifying the datasets of interest. If None an exception msg is raised that shows all available names. When the requested dataset does not exist, the closest matches are displayed in the exception msg. Returns: A lazy dataset. """ return self._get_dataset(name) def _get_dataset(self, name=None): """ The private get_dataset function allows for databases to overwrite get_dataset and adding a map function without recursively calling the map function in case of multiple dataset names. """ if name is None: raise TypeError( f'Missing dataset_name, use e.g.: {self.dataset_names}' ) if isinstance(name, str): pass elif isinstance(name, typing.Iterable) and not isinstance(name, dict): datasets = [self._get_dataset(n) for n in name] return lazy_dataset.concatenate(*datasets) else: raise TypeError( 'Argument type {type(name)} of {name} is not allowed!' 'Expected are str, list or tuple.' ) # Resulting dataset is immutable anyway due to pickle in # `lazy_dataset.from_dict`. This code here avoids to store the # resulting dataset more than once in memory. Discuss with CBJ for # details. try: return self._dataset_weak_ref_dict[name] except KeyError: pass examples = self.get_examples(name) ds = lazy_dataset.from_dict(examples, name=name) self._dataset_weak_ref_dict[name] = ds return ds class DictDatabase(Database): def __init__(self, database_dict: dict): """ A simple database class intended to hold a given database_dict. Args: database_dict: A pickle serializeable database dictionary. """ self._data = database_dict super().__init__() @property def data(self): return self._data class JsonDatabase(Database): def __init__(self, json_path: [str, Path]): """ Args: json_path: path to database JSON """ self._json_path = json_path super().__init__() _data = None @property def data(self): if self._data is None: path = Path(self._json_path).expanduser() with path.open() as fd: self._data = json.load(fd) return self._data def __repr__(self): return f'{type(self).__name__}({self._json_path!r})' ``` #### File: lazy_dataset/tests/test_bucket.py ```python import lazy_dataset def test_bucket(): examples = [1, 10, 5, 7, 8, 2, 4, 3, 20, 1, 6, 9] examples = {str(j): i for j, i in enumerate(examples)} ds = lazy_dataset.new(examples) dynamic_batched_buckets = list(ds.batch_dynamic_time_series_bucket( batch_size=2, len_key=lambda x: x, max_padding_rate=0.5 )) assert dynamic_batched_buckets == [ [10, 5], [7, 8], [1, 2], [4, 3], [6, 9], [20], [1] ] ```
{ "source": "jensheit/paderbox", "score": 4 }
#### File: paderbox/utils/misc.py ```python import os import sys from collections import Mapping from typing import Iterable, Hashable def interleave(*lists): """ Interleave multiple lists. Input does not need to be of equal length. based on http://stackoverflow.com/a/29566946/911441 >>> a = [1, 2, 3, 4, 5] >>> b = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] >>> list(interleave(a, b)) [1, 'a', 2, 'b', 3, 'c', 4, 'd', 5, 'e', 'f', 'g'] >>> list(interleave(b, a)) ['a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 'g'] Args: lists: An arbitrary number of lists Returns: Interleaved lists """ iterators = [iter(l) for l in lists] while True: for iter_idx in range(len(iterators)): try: if iterators[iter_idx] is not None: yield next(iterators[iter_idx]) except StopIteration: iterators[iter_idx] = None if all(i is None for i in iterators): return class PrintSuppressor: """Context manager to suppress print output. Source: https://stackoverflow.com/a/45669280 """ # pylint: disable=attribute-defined-outside-init def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout def all_equal(x: Iterable[Hashable]) -> bool: """ Checks if all elements in `x` are equal. Returns `False` if `x` is empty. Defined to improve readability. Examples: >>> all_equal((1, 1, 1, 1)) True >>> all_equal((1, 1, 1, 2)) False """ if isinstance(x, Mapping): raise TypeError('all_equal does not support Mappings') return len(set(x)) == 1 def all_unique(x: Iterable[Hashable]) -> bool: """ Checks if all elements in `x` are unique. Returns `True` if `x` is empty. Defined to improve readability. Examples: >>> all_unique((1, 2, 3, 4)) True >>> all_unique((1, 2, 3, 1)) False """ if isinstance(x, Mapping): raise TypeError('all_unique does not support Mappings') return len(set(x)) == len(list(x)) def all_in(x: Iterable[Hashable], y: Iterable[Hashable]) -> bool: """ Check if all elements in `x` are in `y`. Returns `True` if `x` is empty. Equivalent to `set(x).issubset(y)`. Defined to improve readability. Examples: >>> all_in([1, 2, 2, 1, 2], [1, 2, 3]) True >>> all_in([1, 2, 2, 4, 2], [1, 2, 3]) False """ return set(x).issubset(y) def any_in(x: Iterable[Hashable], y: Iterable[Hashable]) -> bool: """ Check if any elements in `x` is in `y`. Returns `True` if `x` is empty. Defined to improve readability. Examples: >>> any_in([1, 2, 2, 1, 2], [1, 2, 3]) True >>> any_in([1, 2, 2, 4, 2], [1, 2, 3]) True >>> any_in([1, 2, 2, 4, 2], [3, 5, 6]) False """ return bool(set(x).intersection(y)) ``` #### File: tests/array_tests/test_array_interval.py ```python import pytest from paderbox.array import interval from paderbox.array.interval.util import ( cy_non_intersection, cy_intersection, cy_parse_item, cy_str_to_intervals, cy_invert_intervals, ) def test_cy_invert_intervals(): assert cy_invert_intervals(((1, 2), (3, 4)), 5) == ((0, 1), (2, 3), (4, 5)) assert cy_invert_intervals(((0, 3), (4, 5)), 5) == ((3, 4),) assert cy_invert_intervals(((0, 3), (4, 5)), 6) == ((3, 4), (5, 6)) assert cy_invert_intervals(((1, 3), (4, 5)), 5) == ((0, 1), (3, 4)) assert cy_invert_intervals((), 10) == ((0, 10),) def test_cy_non_intersection(): assert cy_non_intersection((0, 3), ((1, 2),)) == () assert cy_non_intersection((1, 4), ((0, 2), (3, 5))) == ((0, 1), (4, 5)) assert cy_non_intersection((1, 2), ((0, 3),)) == ((0, 1), (2, 3)) def test_cy_intersection(): assert cy_intersection((0, 3), ((1, 2),)) == ((1, 2),) assert cy_intersection((1, 4), ((0, 2), (3, 5))) == ((1, 2), (3, 4)) assert cy_intersection((1, 2), ((0, 3),)) == ((1, 2),) assert cy_intersection((4, 5), ((0, 3),)) == () def test_shape(): ai = interval.zeros(1) assert isinstance(ai.shape, tuple) assert isinstance(ai.shape[0], int) interval.zeros((1,)) assert isinstance(ai.shape, tuple) assert isinstance(ai.shape[0], int) interval.zeros([1, ]) assert isinstance(ai.shape, tuple) assert isinstance(ai.shape[0], int) with pytest.raises(TypeError): interval.zeros('a') with pytest.raises(TypeError): interval.zeros({'num_samples': 42}) with pytest.raises(TypeError): interval.zeros(('asdf', )) with pytest.raises(ValueError): interval.zeros((1, 2)) ```
{ "source": "jensheit/padertorch", "score": 2 }
#### File: contrib/cb/summary.py ```python import collections import torch import einops import cached_property import padertorch as pt # loss: torch.Tenso r =None, # losses: dict =None, # scalars: dict =None, # histograms: dict =None, # audios: dict =None, # images: dict =None, class ReviewSummary(collections.abc.Mapping): """ >>> review_summary = ReviewSummary() >>> review_summary ReviewSummary(prefix='', _data={}) """ _keys = set(pt.train.hooks.SummaryHook.empty_summary_dict().keys()) | { 'loss', 'losses' } def __init__(self, prefix='', _data=None, sampling_rate=None, visible_dB=60): if _data is None: _data = {} self.data = _data self.prefix = prefix self.sampling_rate = sampling_rate self.visible_dB = visible_dB def add_to_loss(self, value): assert torch.isfinite(value), value if 'loss' in self.data: self.data['loss'] = self.data['loss'] + value else: self.data['loss'] = value def add_scalar(self, name, *value): # Save the mean of all added values value = pt.data.batch.example_to_numpy(value, detach=True) self.data.setdefault( 'scalars', {} ).setdefault( f'{self.prefix}{name}', [] ).extend(value) def add_audio(self, name, signal, sampling_rate=None, batch_first=None, normalize=True): if sampling_rate is None: sampling_rate = self.sampling_rate assert sampling_rate is not None, sampling_rate audio = pt.summary.audio( signal=signal, sampling_rate=sampling_rate, batch_first=batch_first, normalize=normalize ) self.data.setdefault( 'audios', {} )[f'{self.prefix}{name}'] = audio def add_text(self, name, text): assert isinstance(text, str), (type(text), text) self.data.setdefault( 'texts', {} )[f'{self.prefix}{name}'] = text def _rearrange(self, array, rearrange): if rearrange is not None: return einops.rearrange(array, rearrange) else: return array def add_image(self, name, image): # Save the last added value image = pt.utils.to_numpy(image, detach=True) if image.ndim != 3: raise AssertionError( 'Did you forgot to call "pt.summary.*_to_image"?\n' f'Expect ndim == 3, got shape {image.shape}.' ) self.data.setdefault( 'images', {} )[f'{self.prefix}{name}'] = image def add_stft_image( self, name, signal, *, batch_first=None, color='viridis', rearrange=None): signal = self._rearrange(signal, rearrange) image = pt.summary.stft_to_image(signal, batch_first=batch_first, color=color, visible_dB=self.visible_dB) self.add_image(name, image) def add_spectrogram_image( self, name, signal, *, batch_first=None, color='viridis', rearrange=None): signal = self._rearrange(signal, rearrange) image = pt.summary.spectrogram_to_image(signal, batch_first=batch_first, color=color, visible_dB=self.visible_dB) self.add_image(name, image) def add_mask_image(self, name, mask, *, batch_first=None, color='viridis', rearrange=None): mask = self._rearrange(mask, rearrange) image = pt.summary.mask_to_image(mask, batch_first=batch_first, color=color) self.add_image(name, image) def add_histogram(self, name, values): value = pt.utils.to_numpy(values, detach=True) self.data.setdefault( 'histograms', {} ).setdefault( f'{self.prefix}{name}', [] ).append(value) def __contains__(self, item): return item in self.data def __getitem__(self, key): assert key in self._keys, (key, self._keys) return self.data[key] def __setitem__(self, key, value): assert key in self._keys, (key, self._keys) self.data[key] = value def get(self, item, default): if item in self: return self.data[item] else: return default def pop(self, *args, **kwargs): """pop(key[, default])""" return self.data.pop(*args, **kwargs) def setdefault(self, key, default): self.data.setdefault(key, default) def __iter__(self): return iter(self.data) def __len__(self): return len(self.data) def __repr__(self): return f'{self.__class__.__name__}(prefix={self.prefix!r}, _data={dict(self)!r})' def _repr_pretty_(self, p, cycle): """ >>> review_summary = ReviewSummary() >>> review_summary.add_to_loss(1) >>> review_summary.add_scalar('abc', 2) >>> review_summary ReviewSummary(prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}}) >>> from IPython.lib.pretty import pprint >>> pprint(review_summary) ReviewSummary(prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}}) >>> pprint(review_summary, max_width=79-18) ReviewSummary( prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}} ) >>> pprint(review_summary, max_width=79-40) ReviewSummary( prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}} ) """ if cycle: p.text(f'{self.__class__.__name__}(...)') else: txt = f'{self.__class__.__name__}(' with p.group(4, txt, ''): p.breakable(sep='') p.text('prefix=') p.pretty(self.prefix) p.text(',') p.breakable() txt = '_data=' with p.group(len(txt), txt, ''): p.pretty(dict(self)) p.breakable('') p.text(')') class _Plotter: def __init__(self, review: 'ReviewSummary'): self.review = review def image( self, key, origin='lower', **kwargs ): import numpy as np import matplotlib.pyplot as plt kwargs = { 'origin': origin, **kwargs, } if key not in self.review['images']: from paderbox.utils.mapping import DispatchError raise DispatchError(key, self.review['images'].keys()) X = np.einsum('chw->hwc', self.review['images'][key]) if origin == 'lower': X = X[::-1] else: assert origin == 'upper' # ToDo: Where is AxesImage defined? ax: 'plt.AxesImage' = plt.imshow( X, **kwargs, ) # ax.set_title(key) plt.title(key) plt.grid(False) return ax def images( self, columns=1, font_scale=1.0, line_width=3, figure_size=(8.0, 6.0), ): from paderbox.visualization import axes_context from paderbox.visualization.context_manager import _AxesHandler with axes_context( columns=columns, font_scale=font_scale, line_width=line_width, figure_size=figure_size, ) as axes: axes: _AxesHandler for k in self.review['images']: axes.new.grid(False) # set gca self.image(k) @cached_property.cached_property def plot(self): return self._Plotter(self) def play(self, key=None): if key is None: for k in self['audios'].keys(): self.play(k) elif key in self['audios']: from paderbox.io.play import play data, sample_rate = self['audios'][key] play(data, sample_rate=sample_rate, name=key) else: from paderbox.utils.mapping import DispatchError raise DispatchError(key, self['audios'].keys()) ``` #### File: contrib/cb/transform.py ```python import typing import math from einops import rearrange import numpy as np import torch.nn.functional import torch_complex from paderbox.array import segment_axis from paderbox.transform.module_stft import _biorthogonal_window_fastest, _get_window from padertorch.contrib.cb.array import overlap_add def stft( time_signal, size: int = 1024, shift: int = 256, *, # axis=-1, # I never use this and it complicated the code window: [str, typing.Callable] = 'blackman', window_length: int = None, fading: typing.Optional[typing.Union[bool, str]] = 'full', pad: bool = True, symmetric_window: bool = False, ): """ >>> import numpy as np >>> import random >>> from paderbox.transform.module_stft import stft as np_stft, istft as np_istft >>> import torch.fft >>> kwargs = dict( ... size=np.random.randint(100, 200), ... shift=np.random.randint(40, 100), ... window=random.choice(['blackman', 'hann', 'hamming']), ... fading=random.choice(['full', 'half', False]), ... ) >>> num_samples = np.random.randint(200, 500) >>> a = np.random.rand(num_samples) >>> A_np = np_stft(a, **kwargs) >>> A_pt = stft(torch.tensor(a), **kwargs) >>> np.testing.assert_allclose( ... A_np, A_pt.numpy(), err_msg=str(kwargs), atol=1e-10) """ assert isinstance(time_signal, torch.Tensor) if window_length is None: window_length = size # Pad with zeros to have enough samples for the window function to fade. assert fading in [None, True, False, 'full', 'half'], (fading, type(fading)) if fading not in [False, None]: if fading == 'half': pad_width = [ (window_length - shift) // 2, math.ceil((window_length - shift) / 2), ] else: pad_width = [ window_length - shift, window_length - shift, ] time_signal = torch.nn.functional.pad(time_signal, pad_width, mode='constant') window = _get_window( window=window, symmetric_window=symmetric_window, window_length=window_length, ) window = torch.from_numpy(window).to(time_signal.device) time_signal_seg = segment_axis( time_signal, window_length, shift=shift, axis=-1, end='pad' if pad else 'cut' ) # if hasattr(torch, 'rfft'): if callable(torch.fft): if window_length != size: raise NotImplementedError( f'Old Torch does not support window_length != size\n' f'window_length = {window_length} != {size} = size' ) out = torch.rfft( time_signal_seg * window, 1, # size, ) assert out.shape[-1] == 2, out.shape return torch_complex.ComplexTensor(out[..., 0], out[..., 1]) else: return torch.fft.rfft(time_signal_seg * window, size) def _complex_to_real(tensor): return torch.stack([tensor.real, tensor.imag], dim=-1) def istft( stft_signal, size: int=1024, shift: int=256, *, window: [str, typing.Callable]='blackman', fading: typing.Optional[typing.Union[bool, str]] = 'full', window_length: int=None, symmetric_window: bool=False, num_samples: int=None, pad: bool=True, biorthogonal_window=None, ): """ Why not torch.istft? - It failed for me sometimes with a strange error msg, when I used a blackman window. - It is slower than this function (Approx 2 times slower). - This can easily invert our numpy stft (i.e. the parameters match). - You can read the source code. - Take as input the correct dtype (i.e. complex and not float). - Note: In the long term torch_complex.ComplexTensor will be replaced with the native type. >>> import numpy as np >>> import random >>> import torch.fft >>> from paderbox.transform.module_stft import stft as np_stft, istft as np_istft >>> kwargs = dict( ... size=np.random.randint(100, 200), ... shift=np.random.randint(40, 70), ... window=random.choice(['blackman', 'hann', 'hamming']), ... fading=random.choice(['full', 'half', False]), ... ) >>> num_samples = np.random.randint(200, 500) >>> a = np.random.rand(num_samples) >>> A_np = np_stft(a, **kwargs) >>> A_pt = stft(torch.tensor(a), **kwargs) >>> np.testing.assert_allclose( ... A_np, A_pt.numpy(), err_msg=str(kwargs), atol=1e-10) >>> a_np = np_istft(A_np, **kwargs) >>> a_pt = istft(A_pt, **kwargs) >>> np.testing.assert_allclose( ... a_np, a_pt.numpy(), err_msg=str(kwargs), atol=1e-10) >>> kwargs['window_length'] = np.random.randint(70, kwargs['size'] - 1) >>> num_samples = np.random.randint(200, 500) >>> a = np.random.rand(num_samples) >>> A_np = np_stft(a, **kwargs) >>> A_pt = stft(torch.tensor(a), **kwargs) >>> np.testing.assert_allclose( ... A_np, A_pt.numpy(), err_msg=str(kwargs), atol=1e-10) >>> a_np = np_istft(A_np, **kwargs) >>> a_pt = istft(A_pt, **kwargs) >>> np.testing.assert_allclose( ... a_np, a_pt.numpy(), err_msg=str(kwargs), atol=1e-10) >>> kwargs = dict( ... size=4, ... shift=2, ... window='hann', ... fading='full', ... ) >>> num_samples = 8 >>> a = np.arange(num_samples).astype(np.float32) >>> np_stft(a, **kwargs) array([[ 0.5+0.j , 0. +0.5j, -0.5+0.j ], [ 4. +0.j , -2. +1.j , 0. +0.j ], [ 8. +0.j , -4. +1.j , 0. +0.j ], [12. +0.j , -6. +1.j , 0. +0.j ], [ 3.5+0.j , 0. -3.5j, -3.5+0.j ]]) >>> a_pt_init = torch.tensor(a, requires_grad=True) >>> A_pt = stft(a_pt_init, **kwargs) >>> a_pt = istft(A_pt, **kwargs) >>> A_pt.grad >>> rnd_num = torch.randn(a_pt.shape) >>> torch.sum(a_pt * rnd_num).backward() >>> A_pt.grad >>> print(rnd_num, '\\n', a_pt_init.grad) """ if window_length is None: window_length = size if biorthogonal_window is None: window = torch.from_numpy( _biorthogonal_window_fastest( _get_window( window, symmetric_window, window_length ), shift)).to(device=stft_signal.device) else: window = biorthogonal_window # if hasattr(torch, 'irfft'): if callable(torch.fft): assert isinstance(stft_signal, torch_complex.ComplexTensor), type(stft_signal) stft_signal = rearrange(_complex_to_real( stft_signal ), '... t f realimag -> ... t f realimag', realimag=2) stft_signal = torch.irfft(stft_signal, 1, signal_sizes=(size,)) else: assert isinstance(stft_signal, torch.Tensor) and stft_signal.is_complex(), (type(stft_signal), stft_signal.dtype) stft_signal = torch.fft.irfft(stft_signal, size) if window_length != size: stft_signal = stft_signal[..., :window_length] stft_signal = stft_signal * window stft_signal = overlap_add( stft_signal, shift ) # Remove the padding values from fading in the stft assert fading in [None, True, False, 'full', 'half'], fading if fading not in [None, False]: pad_width = (window_length - shift) if fading == 'half': pad_width /= 2 stft_signal = stft_signal[ ..., int(pad_width):stft_signal.shape[-1] - math.ceil( pad_width)] if num_samples is not None: if pad: assert stft_signal.shape[-1] >= num_samples, (stft_signal.shape, num_samples) assert stft_signal.shape[-1] < num_samples + shift, (stft_signal.shape, num_samples) stft_signal = stft_signal[..., :num_samples] else: raise ValueError( pad, 'When padding is False in the stft, the signal is cutted.' 'This operation can not be inverted.') return stft_signal ``` #### File: source_separation/pit/evaluate.py ```python import os from collections import defaultdict from pathlib import Path import einops import sacred.commands from sacred import Experiment from sacred.observers import FileStorageObserver from sacred.utils import InvalidConfigError, MissingConfigError import torch import dlp_mpi import paderbox as pb import padertorch as pt import pb_bss from paderbox.transform import istft from lazy_dataset.database import JsonDatabase from padertorch.contrib.examples.source_separation.pit.data import \ prepare_iterable from padertorch.contrib.examples.source_separation.pit.templates import \ MAKEFILE_TEMPLATE_EVAL as MAKEFILE_TEMPLATE experiment_name = "pit" ex = Experiment(experiment_name) @ex.config def config(): debug = False database_json = None if database_json is None and 'NT_DATABASE_JSONS_DIR' in os.environ: database_json = Path( os.environ.get('NT_DATABASE_JSONS_DIR')) / 'wsj0_2mix_8k.json' assert len(database_json) > 0, ( 'Set path to database Json on the command line or set environment ' 'variable "NT_DATABASE_JSONS_DIR"' ) model_path = '' checkpoint_name = 'ckpt_best_loss.pth' experiment_dir = None if experiment_dir is None: experiment_dir = pt.io.get_new_subdir( Path(model_path) / 'evaluation', consider_mpi=True) batch_size = 1 datasets = ["mix_2_spk_min_cv", "mix_2_spk_min_tt"] locals() # Fix highlighting ex.observers.append(FileStorageObserver( Path(Path(experiment_dir) / 'sacred') )) if database_json is None: raise MissingConfigError( 'You have to set the path to the database JSON!', 'database_json') if not Path(database_json).exists(): raise InvalidConfigError('The database JSON does not exist!', 'database_json') @ex.capture def get_model(_run, model_path, checkpoint_name): model_path = Path(model_path) model = pt.Module.from_storage_dir( model_path, checkpoint_name=checkpoint_name, consider_mpi=True # Loads the weights only on master ) checkpoint_path = model_path / 'checkpoints' / checkpoint_name _run.info['checkpoint_path'] = str(checkpoint_path.expanduser().resolve()) return model @ex.command def init(_config, _run): """Creates a storage dir, writes Makefile. Does not start any evaluation.""" experiment_dir = Path(_config['experiment_dir']) config_path = Path(experiment_dir) / "config.json" pb.io.dump_json(_config, config_path) makefile_path = Path(experiment_dir) / "Makefile" makefile_path.write_text(MAKEFILE_TEMPLATE.format( main_python_path=pt.configurable.resolve_main_python_path(), experiment_dir=experiment_dir, experiment_name=experiment_name )) sacred.commands.print_config(_run) print() print('Initialized storage dir. Now run these commands:') print(f"cd {experiment_dir}") print(f"make evaluate") print() print('or') print() print('make ccsalloc') @ex.main def main(_run, batch_size, datasets, debug, experiment_dir, database_json): experiment_dir = Path(experiment_dir) if dlp_mpi.IS_MASTER: sacred.commands.print_config(_run) model = get_model() db = JsonDatabase(json_path=database_json) model.eval() with torch.no_grad(): summary = defaultdict(dict) for dataset in datasets: iterable = prepare_iterable( db, dataset, batch_size, return_keys=None, prefetch=False, ) for batch in dlp_mpi.split_managed(iterable, is_indexable=False, progress_bar=True, allow_single_worker=debug ): entry = dict() model_output = model(pt.data.example_to_device(batch)) example_id = batch['example_id'][0] s = batch['s'][0] Y = batch['Y'][0] mask = model_output[0].numpy() Z = mask * Y[:, None, :] z = istft( einops.rearrange(Z, "t k f -> k t f"), size=512, shift=128 ) s = s[:, :z.shape[1]] z = z[:, :s.shape[1]] entry['metrics'] \ = pb_bss.evaluation.OutputMetrics(speech_prediction=z, speech_source=s).as_dict() summary[dataset][example_id] = entry summary_list = dlp_mpi.gather(summary, root=dlp_mpi.MASTER) if dlp_mpi.IS_MASTER: print(f'len(summary_list): {len(summary_list)}') for partial_summary in summary_list: for dataset, values in partial_summary.items(): summary[dataset].update(values) for dataset, values in summary.items(): print(f'{dataset}: {len(values)}') result_json_path = experiment_dir / 'result.json' print(f"Exporting result: {result_json_path}") pb.io.dump_json(summary, result_json_path) if __name__ == '__main__': with pb.utils.debug_utils.debug_on(Exception): ex.run_commandline() ```
{ "source": "jenshnielsen/ipycytoscape", "score": 2 }
#### File: ipycytoscape/ipycytoscape/cytoscape.py ```python import copy import json from os import path from ipywidgets import CallbackDispatcher, DOMWidget, Widget, widget_serialization from spectate import mvc from traitlets import ( Bool, CaselessStrEnum, CFloat, Dict, Instance, Integer, List, TraitError, TraitType, Unicode, Union, ) from ._frontend import module_name, module_version try: import networkx as nx except ModuleNotFoundError: nx = None try: import pandas as pd except ModuleNotFoundError: pd = None try: import py2neo except ModuleNotFoundError: py2neo = None """TODO: Remove this after this is somewhat done""" import logging logger = logging.getLogger(__name__) __all__ = [ "MONITORED_USER_TYPES", "MONITORED_USER_INTERACTIONS", "Node", "Edge", "Graph", "CytoscapeWidget", ] MONITORED_USER_TYPES = ("node", "edge") MONITORED_USER_INTERACTIONS = ( "mousedown", # when the mouse button is pressed "mouseup", # when the mouse button is released "click", # after mousedown then mouseup "mouseover", # when the cursor is put on top of the target "mouseout", # when the cursor is moved off of the target "mousemove", # when the cursor is moved somewhere on top of the target "touchstart", # when one or more fingers starts to touch the screen "touchmove", # when one or more fingers are moved on the screen "touchend", # when one or more fingers are removed from the screen "tapstart", # normalised tap start event (either mousedown or touchstart) "vmousedown", # alias for 'tapstart' "tapdrag", # normalised move event (either touchmove or mousemove) "vmousemove", # alias for 'tapdrag' "tapdragover", # normalised over element event (either touchmove or mousemove/mouseover) # noqa "tapdragout", # normalised off of element event (either touchmove or mousemove/mouseout) # noqa "tapend", # normalised tap end event (either mouseup or touchend) "vmouseup", # alias for 'tapend' "tap", # normalised tap event (either click, or touchstart followed by touchend without touchmove) # noqa "vclick", # alias for 'tap' "taphold", # normalised tap hold event "cxttapstart", # normalised right-click mousedown or two-finger tapstart "cxttapend", # normalised right-click mouseup or two-finger tapend "cxttap", # normalised right-click or two-finger tap "cxtdrag", # normalised mousemove or two-finger drag after cxttapstart but before cxttapend # noqa "cxtdragover", # when going over a node via cxtdrag "cxtdragout", # when going off a node via cxtdrag "boxstart", # when starting box selection "boxend", # when ending box selection "boxselect", # triggered on elements when selected by box selection "box", # triggered on elements when inside the box on boxend ) class CytoInteractionDict(Dict): """A trait for specifying cytoscape.js user interactions.""" default_value = {} info_text = ( "specify a dictionary whose keys are cytoscape model types " "(pick from %s) and whose values are iterables of user interaction " "event types to get updates on (pick from %s)" ) % ( MONITORED_USER_TYPES, MONITORED_USER_INTERACTIONS, ) def validate(self, obj, value): retval = super().validate(obj, value) try: if not ( set(value.keys()).difference(MONITORED_USER_TYPES) or any( set(v).difference(MONITORED_USER_INTERACTIONS) for v in value.values() ) ): return retval except (AttributeError, TypeError): msg = ( "The %s trait of %s instance must %s, but a value of %s was " "specified." ) % (self.name, type(obj).__name__, self.info_text, value) raise TraitError(msg) def _interaction_handlers_to_json(pydt, _widget): return {k: list(v) for k, v in pydt.items()} def _interaction_handlers_from_json(js, widget): raise ValueError( "Do not set ``_interaction_handlers`` from the client. " "Widget %s received JSON: %s" % (widget, js) ) interaction_serialization = { "to_json": _interaction_handlers_to_json, "from_json": _interaction_handlers_from_json, } class Mutable(TraitType): """A base class for mutable traits using Spectate""" _model_type = None _event_type = "change" def instance_init(self, obj): default = self._model_type() @mvc.view(default) def callback(default, events): change = dict( new=getattr(obj, self.name), name=self.name, type=self._event_type, ) obj.notify_change(change) setattr(obj, self.name, default) class MutableDict(Mutable): """A mutable dictionary trait""" _model_type = mvc.Dict class MutableList(Mutable): """A mutable list trait""" _model_type = mvc.List class Element(Widget): _model_name = Unicode("ElementModel").tag(sync=True) _model_module = Unicode(module_name).tag(sync=True) _model_module_version = Unicode(module_version).tag(sync=True) _view_name = Unicode("ElementView").tag(sync=True) _view_module = Unicode(module_name).tag(sync=True) _view_module_version = Unicode(module_version).tag(sync=True) removed = Bool().tag(sync=True) selected = Bool().tag(sync=True) selectable = Bool().tag(sync=True) classes = Unicode().tag(sync=True) data = MutableDict().tag(sync=True) pannable = Bool().tag(sync=True) _base_cyto_attrs = [ "removed", "selected", "selectable", "classes", "data", "pannable", ] class Edge(Element): """Edge Widget""" _model_name = Unicode("EdgeModel").tag(sync=True) _model_module = Unicode(module_name).tag(sync=True) _model_module_version = Unicode(module_version).tag(sync=True) _view_name = Unicode("EdgeView").tag(sync=True) _view_module = Unicode(module_name).tag(sync=True) _view_module_version = Unicode(module_version).tag(sync=True) pannable = Bool(True).tag(sync=True) # currently we don't sync anything for edges outside of the base Element _cyto_attrs = [] class Node(Element): """Node Widget""" _model_name = Unicode("NodeModel").tag(sync=True) _model_module = Unicode(module_name).tag(sync=True) _model_module_version = Unicode(module_version).tag(sync=True) _view_name = Unicode("NodeView").tag(sync=True) _view_module = Unicode(module_name).tag(sync=True) _view_module_version = Unicode(module_version).tag(sync=True) position = MutableDict().tag(sync=True) locked = Bool(False).tag(sync=True) grabbable = Bool(True).tag(sync=True) grabbed = Bool(False).tag(sync=True) pannable = Bool(False).tag(sync=True) _cyto_attrs = ["position", "locked", "grabbable"] def _set_attributes(instance, data): cyto_attrs = instance._cyto_attrs + instance._base_cyto_attrs for k, v in data.items(): if k in cyto_attrs: setattr(instance, k, v) else: instance.data[k] = v class Graph(Widget): """Graph Widget""" _model_name = Unicode("GraphModel").tag(sync=True) _model_module = Unicode(module_name).tag(sync=True) _model_module_version = Unicode(module_version).tag(sync=True) _view_module = Unicode(module_name).tag(sync=True) _view_module_version = Unicode(module_version).tag(sync=True) nodes = MutableList(Instance(Node)).tag(sync=True, **widget_serialization) edges = MutableList(Instance(Edge)).tag(sync=True, **widget_serialization) # dictionary for syncing graph structure _adj = MutableDict().tag(sync=True) def add_node(self, node): """ Appends node to the end of the list. Equivalent to Python's append method. Parameters ---------- node : ipycytoscape.Node """ self.add_nodes([node]) def add_nodes(self, nodes): """ Appends nodes to the end of the list. Equivalent to Python's extend method. Parameters ---------- nodes : list of ipycytoscape.Node """ node_list = list() for node in nodes: if node.data["id"] not in self._adj: self._adj[node.data["id"]] = dict() node_list.append(node) self.nodes.extend(node_list) def remove_node(self, node): """ Removes node from the end of the list. Equivalent to Python's remove method. Parameters ---------- node : ipycytoscape.Node """ try: self.nodes.remove(node) for target in list(self._adj[node.data["id"]]): self.remove_edge_by_id(node.data["id"], target) for source in list(self._adj): for target in list(self._adj[source]): if target == node.data["id"]: self.remove_edge_by_id(source, node.data["id"]) del self._adj[node.data["id"]] except ValueError: raise ValueError(f'{node.data["id"]} is not present in the graph.') def remove_node_by_id(self, node_id): """ Removes node by the id specified. Parameters ---------- node_id : numeric or string """ node_list_id = -1 for i, node in enumerate(self.nodes): if node.data["id"] == node_id: node_list_id = i if node_list_id != -1: self.remove_node(self.nodes[node_list_id]) else: raise ValueError(f"{node_id} is not present in the graph.") def add_edge(self, edge, directed=False, multiple_edges=False): """ Appends edge from the end of the list. Equivalent to Python's append method. Parameters ---------- edge : cytoscape edge directed : bool multiple_edges : bool """ self.add_edges([edge], directed=directed, multiple_edges=multiple_edges) def add_edges(self, edges, directed=False, multiple_edges=False): """ Appends edges from the end of the list. If either the source or target Node of an Edge is not already in the graph it will be created and added to the Nodes list. Parameters ---------- edges : list of ipycytoscape.Edge directed : bool multiple_edges : boolean """ node_list = list() edge_list = list() for edge in edges: source, target = edge.data["source"], edge.data["target"] if directed and "directed" not in edge.classes: edge.classes += " directed " if multiple_edges and "multiple_edges" not in edge.classes: edge.classes += " multiple_edges " # If multiple edges are allowed, it's okay to add more # edges between the source and target if multiple_edges: new_edge = True # Check to see if the edge source -> target exists in the graph # If it does then don't add it again elif source in self._adj and target in self._adj[source]: new_edge = False # Check to see if the edge target-> source exists in an # undirected graph (don't add it again) elif not directed and target in self._adj and source in self._adj[target]: new_edge = False # If the edge doesn't exist already else: new_edge = True if new_edge: # if the edge is not present in the graph edge_list.append(edge) if source not in self._adj: node_instance = Node() # setting the id, according to current spec should be only int/str node_instance.data = {"id": source} node_list.append(node_instance) self._adj[source] = dict() if target not in self._adj: node_instance = Node() # setting the id, according to current spec should be only int/str node_instance.data = {"id": target} node_list.append(node_instance) self._adj[target] = dict() if multiple_edges and target in self._adj[source]: self._adj[source][target] += 1 else: self._adj[source][target] = 1 if not (directed or "directed" in edge.classes): if multiple_edges and source in self._adj[target]: self._adj[target][source] += 1 else: self._adj[target][source] = 1 else: # Don't add this edge, already present pass self.nodes.extend(node_list) self.edges.extend(edge_list) def remove_edge(self, edge): """ Removes edge from the end of the list. Equivalent to Python's remove method. Parameters ---------- edge : ipcytoscape.Edge """ source = edge.data["source"] target = edge.data["target"] try: self.edges.remove(edge) if self._adj[source][target] == 1: del self._adj[source][target] else: self._adj[source][target] -= 1 if "directed" not in edge.classes: if self._adj[target][source] == 1: del self._adj[target][source] else: self._adj[target][source] -= 1 except ValueError: raise ValueError( f"Edge from {edge.data['source']} to {edge.data['target']} " "is not present in the graph." ) def remove_edge_by_id(self, source_id, target_id): """ Removes edge by the id specified. Parameters ---------- source_id : numeric or string target_id : numeric or string """ contains_edge = False edges = copy.copy(self.edges) for edge in edges: if ( edge.data["source"] == source_id and edge.data["target"] == target_id ) or ( "directed" not in edge.classes and edge.data["source"] == target_id and edge.data["target"] == source_id ): self.remove_edge(edge) contains_edge = True if not contains_edge: raise ValueError( f"Edge between {source_id} and {target_id} is not present in the graph." ) def clear(self): """ Remove all the nodes and edges from the graph. Parameters ---------- self: cytoscape graph """ self.nodes.clear() self.edges.clear() self._adj.clear() def add_graph_from_networkx(self, g, directed=None, multiple_edges=None): """ Converts a NetworkX graph in to a Cytoscape graph. Parameters ---------- g : networkx graph receives a generic NetworkX graph. more info in https://networkx.github.io/documentation/ directed : bool If true all edges will be given directed as class if they do not already have it. Equivalent to adding 'directed' to the 'classes' attribute of edge.data for all edges """ # override type infering if directed is provided by the user if isinstance(g, nx.DiGraph) and directed is None: directed = True # override type infering if multiple_edges is provided by the user if isinstance(g, nx.MultiGraph) and multiple_edges is None: multiple_edges = True node_list = list() for node, data in g.nodes(data=True): if issubclass(type(node), Node): node_instance = node else: node_instance = Node() _set_attributes(node_instance, data) if "id" not in data: node_instance.data["id"] = str(node) node_list.append(node_instance) self.add_nodes(node_list) edge_list = list() for source, target, data in g.edges(data=True): edge_instance = Edge() if issubclass(type(source), Node): edge_instance.data["source"] = source.data["id"] else: edge_instance.data["source"] = str(source) if issubclass(type(target), Node): edge_instance.data["target"] = target.data["id"] else: edge_instance.data["target"] = str(target) _set_attributes(edge_instance, data) if directed and "directed" not in edge_instance.classes: edge_instance.classes += " directed " if multiple_edges and "multiple_edges" not in edge_instance.classes: edge_instance.classes += " multiple_edges " edge_list.append(edge_instance) self.add_edges(edge_list, directed, multiple_edges) def add_graph_from_json(self, json_file, directed=False, multiple_edges=False): """ Converts a JSON Cytoscape graph in to a ipycytoscape graph. (This method only allows the conversion from a JSON that's already formatted as a Cytoscape graph). Parameters ---------- json_file : dict or string If a dict is passed, it will be parsed as a JSON object, a file path (to the json graph file) can also be passed as a string, the file will be loaded it's content parsed as JSON an object. directed : bool If True all edges will be given 'directed' as a class if they do not already have it. """ if path.isfile(str(json_file)): with open(json_file) as f: json_file = json.load(f) node_list = list() for node in json_file["nodes"]: node_instance = Node() _set_attributes(node_instance, node) node_list.append(node_instance) self.add_nodes(node_list) edge_list = list() if "edges" in json_file: for edge in json_file["edges"]: edge_instance = Edge() _set_attributes(edge_instance, edge) if directed and "directed" not in edge_instance.classes: edge_instance.classes += " directed " if multiple_edges and "multiple_edges" not in edge_instance.classes: edge_instance.classes += " multiple_edges " edge_list.append(edge_instance) self.add_edges(edge_list, directed, multiple_edges) def add_graph_from_df( self, df, groupby_cols, attribute_list=[], edges=tuple(), directed=False, multiple_edges=False, ): """ Converts any Pandas DataFrame in to a Cytoscape graph. Parameters ---------- df : pandas dataframe groupby_cols : list of str List of dataframe columns attribute_list : list of str List of dataframe columns edges : tuple of edges The first item is the source edge and the second is the target edge directed : bool If True all edges will be given 'directed' as a class if they do not already have it. """ grouped = df.groupby(groupby_cols) group_nodes = {} for i, name in enumerate(grouped.groups): if not isinstance(name, tuple): name = (name,) group_nodes[name] = Node(data={"id": f"parent-{i}", "name": name}) graph_nodes = [] graph_edges = [] for index, row in df.iterrows(): parent = group_nodes[tuple(row[groupby_cols])] # Includes content to tips tip_content = "" for attribute in attribute_list: tip_content += f"{attribute}: {row[attribute]}\n" # Creates a list with all nodes adding them in the correct node parents graph_nodes.append( Node( data={"id": index, "parent": parent.data["id"], "name": tip_content} ) ) if not all(edges): # Creates a list with all nodes adding them in the correct node parents graph_nodes.append( Node( data={ "id": index, "parent": parent.data["id"], "name": tip_content, } ) ) if directed: classes = "directed " else: classes = "" graph_edges.append( Edge( data={ "id": index, "source": edges[0], "target": edges[1], "classes": classes, } ) ) # Adds group nodes and regular nodes to the graph object all_nodes = list(group_nodes.values()) + graph_nodes self.add_edges(graph_edges, directed, multiple_edges) self.add_nodes(all_nodes) def add_graph_from_neo4j(self, g): """ Converts a py2neo Neo4j subgraph into a Cytoscape graph. It also adds a 'tooltip' node attribute to the Cytoscape graph if it is not present in the Neo4j subgraph. This attribute can be set as a tooltip by set_tooltip_source('tooltip'). The tooltip then displays the node properties from the Neo4j nodes. Parameters ---------- g : py2neo Neo4j subgraph object See https://py2neo.org/v4/data.html#subgraph-objects """ def convert_types_to_string(node_attributes): """ Converts types not compatible with cytoscape to strings. Parameters ---------- node_attributes : dictionary of node attributes """ for k, v in node_attributes.items(): try: json.dumps(v) except TypeError: node_attributes[k] = str(v) return node_attributes def get_node_labels_by_priority(g): """ Returns a list of Neo4j node labels in priority order. If a Neo4j node has multiple labels, the most distinctive (least frequently occuring) label will appear first in this list. Example: five nodes have the labels (Person|Actor) and five nodes have the labels (Person|Director). In this case the Actor and Director labels have priority over the Person label. Parameters ---------- g : py2neo Neo4j subgraph object See https://py2neo.org/v4/data.html#subgraph-objects """ counts = dict() # This counts the number of instances that a node has a particular # label (a node can have multiple labels in Neo4j). # counts.get(label, 0) initializes the count with zero, and then # increments the value if more of the same labels are encountered. for node in g.nodes: for label in node.labels: counts[label] = counts.get(label, 0) + 1 return sorted(counts, key=counts.get) def create_tooltip(node_attributes, node_labels): """ Returns a string of node labels and node attributes to be used as a tooltip. Parameters ---------- node_attributes : dictionary of node attributes node_labels : list of node labels """ labels = ",".join(label for label in node_labels) attributes = "\n".join(k + ":" + str(v) for k, v in node_attributes.items()) return labels + "\n" + attributes # select labels to be displayed as node labels priority_labels = get_node_labels_by_priority(g) # convert Neo4j nodes to cytoscape nodes node_list = list() for node in g.nodes: node_attributes = dict(node) # convert Neo4j specific types to string node_attributes = convert_types_to_string(node_attributes) # create tooltip text string if "tooltip" not in node_attributes: tooltip_text = create_tooltip(node_attributes, node.labels) node_attributes["tooltip"] = tooltip_text # assign unique id to node node_attributes["id"] = node.identity # assign class label with the highest priority index = len(priority_labels) for label in node.labels: index = min(index, priority_labels.index(label)) node_attributes["label"] = priority_labels[index] # create node node_instance = Node() _set_attributes(node_instance, node_attributes) node_list.append(node_instance) self.add_nodes(node_list) # convert Neo4j relationships to cytoscape edges edge_list = list() for rel in g.relationships: edge_instance = Edge() # create dictionaries of relationship rel_attributes = dict(rel) # convert Neo4j specific types to string rel_attributes = convert_types_to_string(rel_attributes) # assign name of the relationship if "name" not in rel_attributes: rel_attributes["name"] = rel.__class__.__name__ # assign unique node ids edge_instance.data["source"] = rel.start_node.identity edge_instance.data["target"] = rel.end_node.identity _set_attributes(edge_instance, rel_attributes) edge_list.append(edge_instance) # Neo4j graphs are directed and may have multiple edges directed = True multiple_edges = True self.add_edges(edge_list, directed, multiple_edges) class CytoscapeWidget(DOMWidget): """Implements the main Cytoscape Widget""" _model_name = Unicode("CytoscapeModel").tag(sync=True) _model_module = Unicode(module_name).tag(sync=True) _model_module_version = Unicode(module_version).tag(sync=True) _view_name = Unicode("CytoscapeView").tag(sync=True) _view_module = Unicode(module_name).tag(sync=True) _view_module_version = Unicode(module_version).tag(sync=True) # interaction options min_zoom = CFloat(1e-50).tag(sync=True) max_zoom = CFloat(1e50).tag(sync=True) zooming_enabled = Bool(True).tag(sync=True) user_zooming_enabled = Bool(True).tag(sync=True) panning_enabled = Bool(True).tag(sync=True) user_panning_enabled = Bool(True).tag(sync=True) box_selection_enabled = Bool(False).tag(sync=True) selection_type = CaselessStrEnum( ["single", "additive"], default_value="single" ).tag(sync=True) touch_tap_threshold = Integer(8).tag(sync=True) desktop_tap_threshold = Integer(4).tag(sync=True) autolock = Bool(False).tag(sync=True) auto_ungrabify = Bool(False).tag(sync=True) auto_unselectify = Bool(True).tag(sync=True) # rendering options headless = Bool(False).tag(sync=True) style_enabled = Bool(True).tag(sync=True) hide_edges_on_viewport = Bool(False).tag(sync=True) texture_on_viewport = Bool(False).tag(sync=True) motion_blur = Bool(False).tag(sync=True) motion_blur_opacity = CFloat(0.2).tag(sync=True) wheel_sensitivity = CFloat(1).tag(sync=True) cytoscape_layout = Dict({"name": "cola"}).tag(sync=True) pixel_ratio = Union([Unicode(), CFloat()], default_value="auto").tag(sync=True) cytoscape_style = List( [ {"selector": "node", "css": {"background-color": "#11479e"}}, {"selector": "node:parent", "css": {"background-opacity": 0.333}}, { "selector": "edge", "style": { "width": 4, "line-color": "#9dbaea", }, }, { "selector": "edge.directed", "style": { "curve-style": "bezier", "target-arrow-shape": "triangle", "target-arrow-color": "#9dbaea", }, }, { "selector": "edge.multiple_edges", "style": { "curve-style": "bezier", }, }, ] ).tag(sync=True) zoom = CFloat(2.0).tag(sync=True) rendered_position = Dict({"renderedPosition": {"x": 100, "y": 100}}).tag(sync=True) tooltip_source = Unicode("tooltip").tag(sync=True) _interaction_handlers = CytoInteractionDict({}).tag( sync=True, **interaction_serialization ) graph = Instance(Graph, args=tuple()).tag(sync=True, **widget_serialization) def __init__(self, graph=None, **kwargs): """ Initializes the graph widget. Parameters ---------- graph: graph: string, dict, pandas.DataFrame, networkx.Graph, neo4j.Graph, Graph object, optional The graph to initialize with. Equivalent to calling the appropriate ``CytoscapeWidget.graph.add_graph_from_` method. """ super().__init__(**kwargs) self.on_msg(self._handle_interaction) self.graph = Graph() if nx and isinstance(graph, nx.Graph): self.graph.add_graph_from_networkx(graph) elif isinstance(graph, (dict, str)): self.graph.add_graph_from_json(graph) elif pd and isinstance(graph, pd.DataFrame): self.graph.add_graph_from_df(graph, **kwargs) elif isinstance(graph, Graph): self.graph = graph elif py2neo and isinstance(graph, py2neo.Graph): self.graph.add_graph_from_neo4j(graph) # Make sure we have a callback dispatcher for this widget and event type; # since _interaction_handlers is synced with the frontend and changes to # mutable values don't automatically propagate, we need to explicitly set # the value of `_interaction_handlers` through the traitlet and allow the # serialized version to propagate to the frontend, where the client code # will add event handlers to the DOM graph. def on(self, widget_type, event_type, callback, remove=False): """ Register a callback to execute when the user interacts with the graph. Parameters ---------- widget_type : str Specify the widget type to monitor. Pick from: - %s event_type : str Specify the type of event to monitor. See documentation on these event types on the cytoscape documentation homepage, (https://js.cytoscape.org/#events/user-input-device-events). Pick from: - %s callback : func Callback to run in the kernel when the user has an `event_type` interaction with any element of type `widget_type`. `callback` will be called with one argument: the JSON-dictionary of the target the user interacted with (which includes a `data` key for the user-provided data in the node). remove : bool, optional Set to true to remove the callback from the list of callbacks. """ if widget_type not in self._interaction_handlers: self._interaction_handlers = dict( [ *self._interaction_handlers.items(), (widget_type, {event_type: CallbackDispatcher()}), ] ) elif event_type not in self._interaction_handlers[widget_type]: self._interaction_handlers = dict( [ *( (wt, v) for wt, v in self._interaction_handlers.items() if wt != widget_type ), ( widget_type, dict( [ *self._interaction_handlers[widget_type].items(), (event_type, CallbackDispatcher()), ] ), ), ] ) self._interaction_handlers[widget_type][event_type].register_callback( callback, remove=remove ) on.__doc__ = on.__doc__ % ( "\n - ".join(MONITORED_USER_TYPES), "\n - ".join(MONITORED_USER_INTERACTIONS), ) def _handle_interaction(self, _widget, content, _buffers): handlers = self._interaction_handlers if ( ("widget" in content) and ("event" in content) and (content["widget"] in handlers) and (content["event"] in handlers[content["widget"]]) ): handlers[content["widget"]][content["event"]](content["data"]) def set_layout(self, **kwargs): """ Sets the layout of the current object. Change the parameters individually. For extensive documentation on the different kinds of layout please refer to https://js.cytoscape.org/#layouts Parameters ---------- name : str name of the layout, ex.: cola, grid. nodeSpacing : int edgeLengthVal : int padding : int adds padding to the whole graph in comparison to the Jupyter's cell **kwargs : All kwargs will be added to the dictionary of the layout. """ dummy_dict = copy.deepcopy(self.cytoscape_layout) for key, value in kwargs.items(): dummy_dict[key] = value self.cytoscape_layout = dummy_dict def get_layout(self): """ Get the currently used layout. """ return self.cytoscape_layout def relayout(self): """ Cause the graph run the layout algorithm again. https://js.cytoscape.org/#cy.layout """ self.send({"name": "layout"}) def set_style(self, style): """ Sets the layout of the current object. Change the parameters with a dictionary. Parameters ---------- stylesheet: dict See https://js.cytoscape.org for layout examples. """ self.cytoscape_style = style def get_style(self): """ Gets the style of the current object. """ return self.cytoscape_style def set_tooltip_source(self, source): """ Parameters ---------- source : string The key in data that will be used to populate the tooltip """ self.tooltip_source = source ```
{ "source": "jenshnielsen/nanotune", "score": 2 }
#### File: nanotune/data/databases.py ```python import logging import ntpath import os from contextlib import contextmanager from typing import Dict, List, Optional, Tuple import qcodes as qc from qcodes.dataset.experiment_container import experiments from qcodes.dataset.sqlite.connection import atomic from qcodes.dataset.sqlite.database import connect from qcodes.dataset.sqlite.queries import add_meta_data, get_metadata from qcodes.dataset.sqlite.query_helpers import insert_column, many_many import nanotune as nt from nanotune.data.dataset import Dataset from nanotune.utils import flatten_list logger = logging.getLogger(__name__) def get_dataIDs( db_name: str, stage: str, db_folder: Optional[str] = None, quality: Optional[int] = None, ) -> List[int]: """""" if db_name[-2:] != "db": db_name += ".db" if db_folder is None: db_folder = nt.config["db_folder"] db_path = os.path.join(db_folder, db_name) conn = connect(db_path) if quality is None: sql = f""" SELECT run_id FROM runs WHERE {stage}={1} OR {stage} LIKE {str(1)} """ else: sql = f""" SELECT run_id FROM runs WHERE ({stage}={1} OR {stage} LIKE {str(1)}) AND (good={quality} OR good LIKE {str(quality)}) """ c = conn.execute(sql) param_names_temp = many_many(c, "run_id") return list(flatten_list(param_names_temp)) def get_unlabelled_ids( db_name: str, db_folder: Optional[str] = None, ) -> List[int]: """""" if db_name[-2:] != "db": db_name += ".db" if db_folder is None: db_folder = nt.config["db_folder"] db_path = os.path.join(db_folder, db_name) conn = connect(db_path) sql = f""" SELECT run_id FROM runs WHERE good IS NULL """ c = conn.execute(sql) param_names_temp = many_many(c, "run_id") return list(flatten_list(param_names_temp)) def list_experiments( db_folder: Optional[str] = None, ) -> Tuple[str, Dict[str, List[int]]]: """""" if db_folder is None: db_folder = nt.config["db_folder"] # print(os.listdir(db_folder)) # print(db_folder) # db_files = glob.glob(db_folder + '*.db') # db_files = glob.glob(db_folder) all_fls = os.listdir(db_folder) db_files = [db_file for db_file in all_fls if db_file.endswith(".db")] print("db_files: {}".format(db_files)) db_names = [ntpath.basename(path) for path in db_files] all_experiments = {} for idb, db_file in enumerate(db_files): qc.config["core"]["db_location"] = os.path.join(db_folder, db_file) exp_ids = [] exps = experiments() for exprmt in exps: exp_ids.append(exprmt.exp_id) all_experiments[db_names[idb]] = exp_ids return db_folder, all_experiments def new_database( db_name: str, db_folder: Optional[str] = None, ) -> str: """ Ceate new database and initialise it """ if db_folder is None: db_folder = nt.config["db_folder"] else: nt.config["db_folder"] = db_folder if db_name[-2:] != "db": db_name += ".db" path = os.path.join(db_folder, db_name) qc.initialise_or_create_database_at(path) nt.config["db_name"] = db_name # add label columns db_conn = connect(path) with atomic(db_conn) as conn: add_meta_data(conn, 0, {"original_guid": 0}) for label in nt.config["core"]["labels"]: add_meta_data(conn, 0, {label: 0}) return path def set_database( db_name: str, db_folder: Optional[str] = None, ) -> None: """""" if db_folder is None: db_folder = nt.config["db_folder"] else: nt.config["db_folder"] = db_folder if db_name[-2:] != "db": db_name += ".db" nt.config["db_name"] = db_name db_path = os.path.join(db_folder, db_name) qc.config["core"]["db_location"] = db_path # check if label columns exist, create if not db_conn = connect(db_path) with atomic(db_conn) as conn: try: get_metadata(db_conn, "0", nt.config["core"]["labels"][0]) except (RuntimeError, KeyError): for label in nt.config["core"]["labels"]: add_meta_data(conn, 0, {label: 0}) def get_database() -> Tuple[str, str]: """""" db_path = qc.config["core"]["db_location"] db_name = os.path.basename(db_path) db_folder = os.path.dirname(db_path) return db_name, db_folder def get_last_dataid( db_name: str, db_folder: Optional[str] = None, ) -> int: """ Return last 'global' dataid for given database. It is this ID that is used in plot_by_id """ if db_folder is None: db_folder = nt.config["db_folder"] if db_name[-2:] != "db": db_name += ".db" nt.set_database(db_name, db_folder=db_folder) last_index = 0 for experiment in experiments(): last_index += experiment.last_counter return last_index @contextmanager def switch_database(temp_db_name: str, temp_db_folder: str): """ """ original_db, original_db_folder = nt.get_database() nt.set_database(temp_db_name, db_folder=temp_db_folder) try: yield finally: nt.set_database(original_db, db_folder=original_db_folder) # def rename_labels(db_name: str, # db_folder: Optional[str] = nt.config['db_folder']) -> bool: # """ # """ # nt.set_database(db_name) # for data_id in range(1, nt.get_last_dataid(db_name)+1): # print(data_id) # nt.correct_label_names(data_id, db_name) # time.sleep(0.1) # if sqlite3.sqlite_version is 3.25 and above, we can use ALTER TABLE to # rename columns: # conn = connect(db_path) # sql = f""" # ALTER TABLE runs RENAME COLUMN wallwall TO leadscoupling; # """ # c = conn.execute(sql) # ALTER TABLE runs RENAME COLUMN wallwall TO leadcoupling; # ALTER TABLE runs RENAME COLUMN clmboscs TO coulomboscillations; # ALTER TABLE runs RENAME COLUMN clmbdiam TO coulombdiamonds; # ALTER TABLE runs RENAME COLUMN zbp TO zerobiaspeak; ``` #### File: nanotune/data/plotting.py ```python import copy import os from typing import Dict, List, Optional, Tuple, Union import matplotlib import matplotlib.pyplot as plt from cycler import cycler from matplotlib.colors import LinearSegmentedColormap, ListedColormap from mpl_toolkits.axes_grid1 import make_axes_locatable import nanotune as nt from nanotune.data.dataset import Dataset, default_coord_names AxesTuple = Tuple[matplotlib.axes.Axes, List[matplotlib.colorbar.Colorbar]] plot_params_type = Dict[str, Union[str, float, int, bool, List[float]]] default_plot_params: plot_params_type = { "backend": "ps", # 'text.latex.preamble': [r'\usepackage{gensymb}'], "image.origin": "lower", "axes.labelsize": 10, "axes.linewidth": 0.8, "axes.labelweight": 10, "axes.edgecolor": "grey", "axes.labelpad": 0.4, "xtick.direction": "out", "ytick.direction": "out", "xtick.labelsize": 10, "ytick.labelsize": 10, "legend.numpoints": 1, "legend.markerscale": 1, # 'legend.fontsize': 'x-small', # 'text.fontsize': 8, "font.size": 10, "lines.linewidth": 0.6, "lines.markersize": 5, "savefig.dpi": 300, "axes.grid": False, "image.interpolation": "nearest", "text.usetex": False, "legend.fontsize": 10, "legend.labelspacing": 0.5, "legend.framealpha": 0.8, "figure.figsize": [7.0, 5.0], "font.family": "serif", # 'pcolor.shading': 'auto, } lightblue = "#6699CC" # (255, 153, 204) blue = "#336699" # (51, 102, 153) darkblue = "#264D73" # (38, 77, 115) cyan = "#33BBEE" # (51, 187, 238) lightteal = "#00E6E6" # (0, 230, 230) teal = "#009988" # (0, 153, 136) darkteal = "#006666" # (0, 102, 102) orange = "#EE7733" # (238, 119, 51) lightred = "#FF531A" # (255, 83, 26) red = "#CC3311" # (204, 51, 17) darkred = "#802000" # (128, 32, 0) magenta = "#EE3377" # (238, 51, 119) grey = "#BBBBBB" # (187, 187, 187) custom_cm = LinearSegmentedColormap.from_list( "vivid_colorblind", [darkblue, cyan, teal, red, orange] ) plt.register_cmap(cmap=custom_cm) matplotlib.rcParams["image.cmap"] = "vivid_colorblind" colors = [ blue, red, cyan, orange, teal, lightblue, lightteal, lightred, darkblue, darkteal, darkred, grey, ] matplotlib.rcParams["axes.prop_cycle"] = cycler(color=colors) # ['#003399', '#FF6633', '#996699', '#99CCFF', '#EE442F', '#F4D4D4', # '#63ACBE', '#9C9EB5', '#FDF0F2', '#ABC3C9']) colors_dict = { "lightblue": lightblue, "blue": blue, "darkblue": darkblue, "cyan": cyan, "lightteal": lightteal, "teal": teal, "darkteal": darkteal, "orange": orange, "lightred": lightred, "red": red, "darkred": darkred, "magenta": magenta, "grey": grey, } def plot_dataset( qc_run_id: int, db_name: str, save_figures: bool = True, db_folder: Optional[str] = None, plot_filtered_data: bool = False, plot_params: Optional[plot_params_type] = None, ax: Optional[matplotlib.axes.Axes] = None, colorbar: Optional[matplotlib.colorbar.Colorbar] = None, filename: Optional[str] = None, file_location: Optional[str] = None, ) -> AxesTuple: """ If to be saved and no file location specified, the figure will be saved at os.path.join(nt.config['db_folder'], 'tuning_results', dataset.device_name) in both eps and png """ if plot_params is None: plot_params = default_plot_params matplotlib.rcParams.update(plot_params) if db_folder is None: _, db_folder = nt.get_database() dataset = Dataset(qc_run_id, db_name, db_folder=db_folder) if plot_filtered_data: data = dataset.filtered_data else: data = dataset.data if ax is None: fig_size = copy.deepcopy(plot_params["figure.figsize"]) fig_size[1] *= len(dataset.data) * 0.8 # type: ignore fig, ax = plt.subplots( len(dataset.data), 1, squeeze=False, figsize=fig_size, ) colorbars: List[matplotlib.colorbar.Colorbar] = [] fig_title = dataset.guid for r_i, read_meth in enumerate(dataset.readout_methods): c_name = default_coord_names["voltage"][0] voltage_x = data[read_meth][c_name].values signal = data[read_meth].values.T if dataset.dimensions[read_meth] == 1: colorbar = None ax[r_i, 0].plot( voltage_x, signal, zorder=6, ) ax[r_i, 0].set_xlabel(dataset.get_plot_label(read_meth, 0)) ax[r_i, 0].set_ylabel(dataset.get_plot_label(read_meth, 1)) ax[r_i, 0].set_title(str(fig_title)) divider = make_axes_locatable(ax[r_i, 0]) cbar_ax = divider.append_axes("right", size="5%", pad=0.06) cbar_ax.set_facecolor("none") for caxis in ["top", "bottom", "left", "right"]: cbar_ax.spines[caxis].set_linewidth(0) cbar_ax.set_xticks([]) cbar_ax.set_yticks([]) colorbars.append(colorbars) ax[r_i, 0].figure.tight_layout() elif dataset.dimensions[read_meth] == 2: c_name = default_coord_names["voltage"][1] voltage_y = data[read_meth][c_name].values colormesh = ax[r_i, 0].pcolormesh( voltage_x, voltage_y, signal, shading="auto", ) if colorbar is not None: colorbars.append( ax[r_i, 0].figure.colorbar( colormesh, ax=ax[r_i, 0], cax=colorbar.ax ) ) else: # colorbar = fig.colorbar(colormesh, ax=ax[r_i, 0]) divider = make_axes_locatable(ax[r_i, 0]) cbar_ax = divider.append_axes("right", size="5%", pad=0.06) colorbars.append( fig.colorbar( colormesh, ax=ax[r_i, 0], cax=cbar_ax, ) ) colorbars[-1].set_label( dataset.get_plot_label(read_meth, 2), rotation=-270, ) ax[r_i, 0].set_xlabel(dataset.get_plot_label(read_meth, 0)) ax[r_i, 0].set_ylabel(dataset.get_plot_label(read_meth, 1)) ax[r_i, 0].set_title(str(fig_title)) ax[r_i, 0].figure.tight_layout() else: raise NotImplementedError if save_figures: if file_location is None: file_location = os.path.join( nt.config["db_folder"], "tuning_results", dataset.device_name ) if not os.path.exists(file_location): os.makedirs(file_location) if filename is None: filename = "dataset_" + str(dataset.guid) else: filename = os.path.splitext(filename)[0] path = os.path.join(file_location, filename + ".png") plt.savefig(path, format="png", dpi=600, bbox_inches="tight") return ax, colorbars ``` #### File: nanotune/device_tuner/dottuner.py ```python import copy import logging from typing import Any, Dict, List, Optional, Tuple import numpy as np import qcodes as qc from qcodes import validators as vals from qcodes.dataset.experiment_container import (load_experiment, load_last_experiment) import nanotune as nt from nanotune.classification.classifier import Classifier from nanotune.device.device import Device as Nt_Device from nanotune.device.device_channel import DeviceChannel from nanotune.device_tuner.tuner import Tuner from nanotune.device_tuner.tuningresult import MeasurementHistory, TuningResult from nanotune.fit.pinchofffit import PinchoffFit from nanotune.tuningstages.chargediagram import ChargeDiagram from nanotune.utils import flatten_list logger = logging.getLogger(__name__) DATA_DIMS = { "gatecharacterization1d": 1, "chargediagram": 2, "coulomboscillations": 1, } class DotTuner(Tuner): """ classifiers = { 'pinchoff': Optional[Classifier], 'singledot': Optional[Classifier], 'doubledot': Optional[Classifier], 'dotregime': Optional[Classifier], } data_settings = { 'db_name': str, 'db_folder': Optional[str], 'qc_experiment_id': Optional[int], 'segment_db_name': Optional[str], 'segment_db_folder': Optional[str], } setpoint_settings = { 'voltage_precision': float, } """ def __init__( self, name: str, data_settings: Dict[str, Any], classifiers: Dict[str, Classifier], setpoint_settings: Dict[str, Any], ) -> None: super().__init__( name, data_settings, classifiers, setpoint_settings, ) self._tuningresults_all: Dict[str, MeasurementHistory] = {} def tune( self, device: Nt_Device, desired_regime: str = "doubledot", max_iter: int = 100, take_high_res: bool = False, ) -> MeasurementHistory: """""" assert "singledot" in self.classifiers.keys() assert "doubledot" in self.classifiers.keys() assert "dotregime" in self.classifiers.keys() if device.name not in self._tuningresults_all.keys(): self._tuningresults_all[device.name] = MeasurementHistory(device.name) if self.qcodes_experiment.sample_name != Nt_Device.name: logger.warning( "The device's name does match the" + " the sample name in qcodes experiment." ) device.all_gates_to_highest() self.update_normalization_constants(device) self.set_top_barrier(device) result = self.tune_1D( device, desired_regime=desired_regime, max_iter=max_iter, set_barriers=True, take_high_res=take_high_res, ) return result def set_top_barrier( self, device: Nt_Device, ) -> None: """ """ if device.initial_valid_ranges()[0] == device.gates[0].safety_range(): (top_barrier_ranges, measurement_result) = self.measure_initial_ranges( device.gates[0], [device.gates[1], device.gates[5]] ) self._tuningresults_all[device.name].update(measurement_result) else: top_barrier_ranges = device.initial_valid_ranges()[0] # L, H = top_barrier_ranges # tb_voltage = 0.5 * (H + T) device.gates[0].current_valid_range(top_barrier_ranges) device.gates[0].voltage(top_barrier_ranges[1]) def tune_1D( self, device: Nt_Device, desired_regime: str, max_iter: int = 100, set_barriers: bool = True, take_high_res: bool = False, reset_gates: bool = False, continue_tuning: bool = False, ) -> Dict[Any, Any]: """Does not reset any tuning""" if device.name not in self._tuningresults_all.keys(): self._tuningresults_all[device.name] = MeasurementHistory(device.name) if reset_gates: device.all_gates_to_highest() done = False if set_barriers: self.set_central_barrier(device, desired_regime=desired_regime) success, new_action = self.set_outer_barriers(device) while not success: self.update_top_barrier(device, new_action) success, new_action = self.set_outer_barriers(device) plungers = [device.left_plunger, device.right_plunger] n_iter = 0 while not done and n_iter <= max_iter: n_iter += 1 good_barriers = False while not good_barriers: # Narrow down plunger ranges: success, barrier_actions = self.set_new_plunger_ranges(device) if not success: out_br_success, new_action = self.update_barriers( device, barrier_actions ) if not out_br_success: logger.info( ( "Outer barrier reached safety limit. " "Setting new top barrier.\n" ) ) self.update_top_barrier(device, new_action) success, new_action = self.set_outer_barriers(device) while not success: self.update_top_barrier(device, new_action) (success, new_action) = self.set_outer_barriers(device) else: good_barriers = True tuningresult = self.get_charge_diagram( device, plungers, signal_thresholds=[0.004, 0.1], ) self._tuningresults_all[device.name].add_result( tuningresult, f"chargediagram_{n_iter}" ) logger.info( ( f"ChargeDiagram stage finished: {tuningresult.success}\n" f"termination_reason: {tuningresult.termination_reasons}" ) ) segment_info = tuningresult.features["segment_info"] if desired_regime in segment_info[:, 2]: logger.info("Desired regime found.") done = True if done and take_high_res: logger.info("Take high resolution of entire charge diagram.") tuningresult = self.get_charge_diagram( device, plungers, voltage_precision=0.0005, ) self._tuningresults_all[device.name].add_result( tuningresult, f"chargediagram_highres_{n_iter}" ) logger.info("Take high resolution of charge diagram segments.") segment_info = tuningresult.features["segment_info"] for sid, segment in enumerate(segment_info): if segment[2] == desired_regime: readout_meth = segment[1].keys()[0] v_ranges = segment[1][readout_meth] axis_keys = ["range_x", "range_y"] for plunger, key in zip(plungers, axis_keys): plunger.current_valid_range(v_ranges[key]) tuningresult = self.get_charge_diagram( device, plungers, voltage_precision=0.0001, ) self._tuningresults_all[device.name].add_result( tuningresult, f"chargediagram_{n_iter}_segment_{sid}" ) if continue_tuning: done = False logger.warning("Continue tuning regardless of the outcome.") if not done: self.update_gate_configuration( device, tuningresult, desired_regime, ) logger.info( ( "Continuing with new configuration: " f"{device.get_gate_status()}." ) ) if n_iter >= max_iter: logger.info(f"Tuning {device.name}: Max number of iterations reached.") return self._tuningresults_all[device.name] def update_top_barrier( self, device: Nt_Device, new_action: str, ) -> None: """ """ new_top = self.choose_new_gate_voltage( device, 0, new_action, range_change=0.1, max_range_change=0.05, min_range_change=0.01, ) device.top_barrier.voltage(new_top) logger.info( ( f"Setting top barrier to {new_top}" "Characterize and set outer barriers again.\n" ) ) def update_barriers( self, device: Nt_Device, barrier_actions: Dict[int, str], range_change: float = 0.1, ) -> Tuple[bool, str]: """ """ success = True for gate_layout_id, actions in barrier_actions.items(): for action in actions: new_voltage = self.choose_new_gate_voltage( device, gate_layout_id, action, range_change=range_change, max_range_change=0.05, min_range_change=0.01, ) safe_range = device.gates[gate_layout_id].safety_range() touching_limits = np.isclose(new_voltage, safe_range, atol=0.1) if any(touching_limits): success = False if touching_limits[0]: # Some other gate needs to be set more negative new_action = "more negative" else: new_action = "more positive" else: device.gates[gate_layout_id].voltage(new_voltage) logger.info( ( "Choosing new voltage range for" f"{device.gates[gate_layout_id]}: {new_voltage}" ) ) return success, new_action def set_outer_barriers( self, device: Nt_Device, ) -> Tuple[bool, str]: """ Will not update upper valid range limit. We assume it has been determined in the beginning with central_barrier = 0 V and does not change. new_voltage = T + 2 / 3 * abs(T - H) """ # (barrier_values, success) = self.get_outer_barriers() result = self.characterize_gates( device, gates=[device.left_barrier, device.right_barrier], comment="Characetize outer barriers before setting them.", use_safety_ranges=True, ) self._tuningresults_all[device.name].update(result) success = True for barrier in [device.left_barrier, device.right_barrier]: key = f"characterization_{barrier.name}" features = result.tuningresults[key].features L, H = features["low_voltage"], features["high_voltage"] T = features["transition_voltage"] new_voltage = T + 2 / 3 * abs(T - H) curr_sft = barrier.safety_range() touching_limits = np.isclose(new_voltage, curr_sft, atol=0.005) if any(touching_limits): success = False if touching_limits[0]: new_action = "more negative" else: new_action = "more positive" else: barrier.voltage(new_voltage) barrier.current_valid_range((L, H)) barrier.transition_voltage(T) logger.info(f"Setting {barrier.name} to {new_voltage}.") return success, new_action def set_central_barrier( self, device: Nt_Device, desired_regime: str = "doubledot", ) -> None: """""" setpoint_settings = copy.deepcopy(self.setpoint_settings()) setpoint_settings["gates_to_sweep"] = [device.central_barrier] result = self.characterize_gates( device, gates=[device.central_barrier], use_safety_ranges=True, ) self._tuningresults_all[device.name].update(result) key = f"characterization_{device.central_barrier.name}" features = result.tuningresults[key].features if desired_regime == 1: self.central_barrier.voltage(features["high_voltage"]) elif desired_regime == 3: data_id = result.tuningresults[key].data_ids[-1] ds = nt.Dataset( data_id, self.data_settings["db_name"], db_folder=self.data_settings["db_folder"], ) read_meths = device.readout_methods().keys() if "transport" in read_meths: signal = ds.data["transport"].values voltage = ds.data["transport"]["voltage_x"].values elif "sensing" in read_meths: signal = ds.data["sensing"].values voltage = ds.data["sensing"]["voltage_x"].values elif "rf" in read_meths: signal = ds.data["rf"].values voltage = ds.data["rf"]["voltage_x"].values else: raise ValueError("Unknown readout method.") v_sat_idx = np.argwhere(signal < float(2 / 3))[-1][0] three_quarter = voltage[v_sat_idx] self.central_barrier.voltage(three_quarter) else: raise ValueError self.central_barrier.current_valid_range( [features["low_voltage"], features["high_voltage"]] ) logger.info(f"Central barrier to {self.central_barrier.voltage()}") def update_gate_configuration( self, device: Nt_Device, last_result: TuningResult, desired_regime: str, # range_change: float = 0.1, # min_change: float = 0.01, ) -> None: """ Choose new gate voltages when previous tuning did not result in any good regime. """ termination_reasons = last_result.termination_reasons if not termination_reasons and not last_result.success: raise ValueError( ( "Unknown tuning outcome. Expect either a successful " "tuning stage or termination reasons" ) ) if not termination_reasons: # A good regime was found, but not the right one. Change central # barrier to get there if desired_regime == 1: action = "more positive" else: action = "more negative" success, new_action = self.update_barriers( device, {device.central_barrier.layout_id(): action}, range_change=0.05, ) while not success: self.update_top_barrier(device, new_action) self.set_central_barrier(device, desired_regime=desired_regime) success, new_action = self.set_outer_barriers(device) else: # the charge diagram terminated unsuccessfully: no plunger ranges # were found to give a good diagram. The device might be too # pinched off or too open. all_actions = [ "x more negative", "x more positive", "y more negative", "y more positive", ] gates_to_change = [ device.left_barrier, device.left_barrier, device.right_barrier, device.right_barrier, ] barrier_actions = {} for action, gate in zip(all_actions, gates_to_change): if action in termination_reasons: barrier_actions[gate.layout_id()] = action[2:] success, new_action = self.update_barriers( device, barrier_actions, ) while not success: self.update_top_barrier(device, new_action) self.set_central_barrier(device, desired_regime=desired_regime) success, new_action = self.set_outer_barriers(device) def choose_new_gate_voltage( self, device: Nt_Device, layout_id: int, action: str, range_change: float = 0.5, max_range_change: float = 0.1, min_range_change: float = 0.05, ) -> float: """ based on current_valid_range or safety_range if no current_valid_range is set """ curr_v = device.gates[layout_id].voltage() sfty_rng = device.gates[layout_id].safety_range() try: L, H = device.gates[layout_id].current_valid_range() except ValueError: L, H = device.gates[layout_id].safety_range() if action == "more negative": v_change = max(range_change * abs(curr_v - L), min_range_change) v_change = min(max_range_change, v_change) new_v = curr_v - v_change if new_v < sfty_rng[0]: new_v = sfty_rng[0] elif action == "more positive": v_change = max(range_change * abs(curr_v - H), min_range_change) v_change = min(max_range_change, v_change) new_v = curr_v + v_change if new_v > sfty_rng[1]: new_v = sfty_rng[1] else: raise ValueError("Invalid action in choose_new_gate_voltage") return new_v def set_new_plunger_ranges( self, device: Nt_Device, noise_floor: float = 0.02, open_signal: float = 0.1, ) -> Tuple[bool, Dict[int, str]]: """ noise_floor and open_signal compared to normalised signal checks if barriers need to be adjusted, depending on min and max signal """ result = self.characterize_gates( device, gates=[device.left_plunger, device.right_plunger], use_safety_ranges=True, ) plunger_barrier_pairs = { device.left_plunger: device.left_barrier, device.right_plunger: device.right_barrier, } self._tuningresults_all[device.name].update(result) barrier_actions: Dict[int, str] = {} success = True for plunger, barrier in plunger_barrier_pairs.items(): key = f"characterization_{plunger.name}" features = result[key]["features"] max_sig = features["max_signal"] min_sig = features["min_signal"] low_voltage = features["low_voltage"] high_voltage = features["low_voltage"] new_range = (low_voltage, high_voltage) plunger.current_valid_range(new_range) logger.info(f"{plunger.name}: new current valid range set to {new_range}") if min_sig > open_signal: barrier_actions[barrier.layout_id()] = "more negative" success = False if max_sig < noise_floor: barrier_actions[barrier.layout_id()] = "more positive" success = False return success, barrier_actions def get_charge_diagram( self, device: Nt_Device, gates_to_sweep: List[DeviceChannel], voltage_precision: Optional[float] = None, signal_thresholds: Optional[List[float]] = None, iterate: bool = False, comment: str = "", ) -> TuningResult: """ stage.segment_info = ((data_id, ranges, category)) """ required_clf = ["singledot", "doubledot", "dotregime"] for clf in required_clf: if clf not in self.classifiers.keys(): raise KeyError(f"No {clf} classifier found.") setpoint_settings = copy.deepcopy(self.setpoint_settings()) setpoint_settings["parameters_to_sweep"] = gates_to_sweep setpoint_settings["voltage_precision"] = voltage_precision with self.device_specific_settings(device): stage = ChargeDiagram( data_settings=self.data_settings, setpoint_settings=setpoint_settings, readout_methods=device.readout_methods(), classifiers=self.classifiers, ) tuningresult = stage.run_stage(iterate=iterate) tuningresult.status = device.get_gate_status() tuningresult.comment = comment return tuningresult ``` #### File: nanotune/labelling/manipulate_labels.py ```python import logging import os from typing import Optional import qcodes as qc from qcodes.dataset.experiment_container import load_by_id from qcodes.dataset.plotting import plot_by_id import nanotune as nt logger = logging.getLogger(__name__) LABELS = list(dict(nt.config["core"]["labels"]).keys()) def correct_label( dataid: int, db_name: str, new_stage: str, new_quality: int, db_folder: Optional[str] = None, ) -> bool: if db_folder is None: db_folder = nt.config["db_folder"] if db_name[-2:] != "db": db_name += ".db" db_folder = os.path.join(nt.config["db_folder"], db_name) qc.config["core"]["db_location"] = db_folder ds = load_by_id(dataid) if new_stage not in LABELS: logger.error("Wrong tuning stage. Leaving old label.") return False else: new_label = dict.fromkeys(LABELS, 0) new_label[new_stage] = 1 new_label["good"] = int(new_quality) for label, value in new_label.items(): ds.add_metadata(label, value) return True def print_label( dataid: int, db_name: str, db_folder: Optional[str] = None, plot_data: Optional[bool] = True, ) -> None: """""" if db_folder is None: db_folder = nt.config["db_folder"] ds = load_by_id(dataid) if plot_data: plot_by_id(dataid) print("dataset {} in {}: ".format(dataid, db_name)) quality_mapping = {1: "good", 0: "poor"} for label in LABELS: if label != "good": if int(ds.get_metadata(label)) == 1: quality = quality_mapping[int(ds.get_metadata("good"))] print("{} {}.".format(quality, label)) ``` #### File: nanotune/model/node.py ```python import logging from typing import Optional import qcodes as qc from qcodes import Instrument, InstrumentChannel, Parameter from qcodes import validators as vals from qcodes.instrument.base import InstrumentBase from qcodes.utils.validators import Validator import nanotune as nt logger = logging.getLogger(__name__) class Node(InstrumentChannel): def __init__( self, parent: InstrumentBase, name: str = "node", label: str = "", node_type: Optional[str] = None, n_init: int = 0, v_init: float = 0, ): pass super().__init__(parent, name) self.add_parameter( "node_type", label="node type " + label, unit=None, get_cmd=None, set_cmd=None, initial_value=node_type, vals=vals.Strings(), ) self.add_parameter( "n", label="number of charges " + label, unit=None, set_cmd=self._set_n, get_cmd=self._get_n, initial_value=n_init, vals=vals.Ints(-100000, 100000), ) self.add_parameter( "v", label="voltage node " + label, unit="V", set_cmd=self._set_v, get_cmd=self._get_v, initial_value=v_init, vals=vals.Numbers(-100000, 100000), ) def _set_n(self, new_N: int) -> None: self._n = new_N def _get_n(self) -> int: return self._n def _set_v(self, new_V: int) -> None: self._v = new_V def _get_v(self) -> float: return self._v ``` #### File: nanotune/nanotune/utils.py ```python import json import logging import os from typing import Any, Dict, List, Optional, Tuple, no_type_check import matplotlib.pyplot as plt import numpy as np from qcodes.dataset.experiment_container import load_by_id import nanotune as nt logger = logging.getLogger(__name__) def get_path(filename: str) -> str: """ Get path to file, which is expected to be in nt.config['db_folder'] """ return os.path.join(nt.config["db_folder"], filename) @no_type_check def flatten_list(nested_list: List[Any]) -> Any: """ Returns an operator/iterator iterating through a flattened list """ for i in nested_list: if isinstance(i, (list, tuple)): for j in flatten_list(i): yield j else: yield i def get_recursively( search_dict: Dict[str, Any], parameter_name: str, value_field: str = "value", ) -> List[str]: """ Recursively searches a QCoDeS metadata dict for the value of a given parameter. """ param_values = [] for key, value in search_dict.items(): if value == parameter_name: param_values.append(search_dict[value_field]) elif isinstance(value, dict): results = get_recursively(value, parameter_name) for result in results: param_values.append(result) elif isinstance(value, list): for item in value: if isinstance(item, dict): more_results = get_recursively(item, parameter_name) for another_result in more_results: param_values.append(another_result) return param_values def get_param_values( qc_run_id: int, db_name: str, db_folder: Optional[str] = None, return_meta_add_on: Optional[bool] = False, ) -> Tuple[List[List[str]], List[List[str]]]: """""" if db_folder is None: db_folder = nt.config["db_folder"] nt.set_database(db_name) ds = load_by_id(qc_run_id) nt_metadata = json.loads(ds.get_metadata(nt.meta_tag)) snapshot = json.loads(ds.get_metadata("snapshot"))["station"] device_name = nt_metadata["device_name"] device_snap = snapshot["instruments"][device_name] submods = device_snap["submodules"].keys() param_values = [["Parameter", "Value"]] for submod in submods: gate_val = device_snap["submodules"][submod]["parameters"] try: gate_val = gate_val["voltage"]["value"] except KeyError: gate_val = gate_val["state"]["value"] param_values.append([submod, gate_val]) features = [] if return_meta_add_on: features = [["Feature", "Value"]] param_values.append(["db_name", db_name]) param_values.append(["guid", ds.guid]) for name, v in nt_metadata.items(): if name == "elapsed_time" and v is not None: m, s = divmod(v, 60) h, m = divmod(m, 60) v = "{}h {}min {}s".format(h, m, s) param_values.append([name, v]) elif name == "features": for fname, fval in v.items(): features.append([fname, fval]) elif name == "normalization_constants": param_values.append(["dc normalization", str(v["transport"])]) param_values.append(["rf normalization", str(v["rf"])]) else: if type(v) == list: param_values.append([name, *v]) else: param_values.append([name, v]) return param_values, features def format_axes(axes: List[plt.Axes], color: str, linewidth: float): for ax in axes: for spine in ["top", "right"]: ax.spines[spine].set_visible(False) for spine in ["left", "bottom"]: ax.spines[spine].set_color(color) ax.spines[spine].set_linewidth(linewidth) ax.xaxis.set_ticks_position("bottom") ax.yaxis.set_ticks_position("left") for axis in [ax.xaxis, ax.yaxis]: axis.set_tick_params(direction="out", color=color) return axes ``` #### File: sim/data_providers/static_data_provider.py ```python import os from typing import Any, Optional, Sequence, Union import qcodes as qc import xarray as xr from scipy import interpolate from sim.data_provider import DataProvider class StaticDataProvider(DataProvider): """Data provider that returns a constant value for all inputs.""" @classmethod def make(cls, **kwargs) -> Any: """ ISerializable override to create an instance of this class """ return cls(kwargs["value"]) def __init__(self, value: float) -> None: super().__init__(settable=True) self._value = value def __call__(self, *args) -> float: return self._value def get_value(self) -> float: """The current value of this data provider""" return self._value def set_value(self, value: float) -> None: """Set the static value of this data provider""" self._value = value @property def raw_data(self) -> xr.DataArray: """Returns the raw data backing this provider as an xarray.DataArray """ return xr.DataArray("0", dims="x", coords={"x": [1]}) ``` #### File: sim/data_providers/synthetic_pinchoff_data_provider.py ```python import os from typing import Any, Optional, Sequence, Union import numpy as np import xarray as xr from sim.data_provider import DataProvider from sim.mock_device_registry import MockDeviceRegistry from sim.mock_pin import IMockPin class SyntheticPinchoffDataProvider(DataProvider): """ Simulates pinchoff curves using hyperbolic tangent funtion. Inputs: min, max : controls the lower and upper bounds of output center : Defines x value where the center of the curve will be located width : Defines the overall width of the curve upper_tilt : If non-zero the upper half of the function will follow a linear slope instead of flattening out at the max value lower_tilt : If non-zero the lower half of the function will follow a linear slope instead of flattening out at the max value noise : If non-zero, gaussian noise will be added. The value indicates the amplitude of the noise in terms of percentage of the height of the curve (max - min). e.g. 0.03 would indicate 3% of the height of the curve flip : If False, the curve will rise with increasing x values. If True, the curve will fall with increasing x values. """ @classmethod def make(cls, **kwargs): src_pin = MockDeviceRegistry.resolve_pin(kwargs["src_pin"]) min : float = kwargs["min"] max : float = kwargs["max"] center : float = kwargs["center"] width : float = kwargs["width"] upper_tilt : float = kwargs.get("upper_tilt", 0.0) lower_tilt : float = kwargs.get("lower_tilt", 0.0) noise : float = kwargs.get("noise", 0.0) flip : bool = kwargs.get("flip", False) raw_data_samples : int = kwargs.get("raw_data_samples", 500) return cls(src_pin, min, max, center, width, upper_tilt, lower_tilt, flip, raw_data_samples) def __init__( self, src_pin : IMockPin, min : float, max : float, center : float, width : float, upper_tilt : float = 0.0, lower_tilt : float = 0.0, noise : float = 0.0, # % of height flip : bool = False, raw_data_samples : int = 500 ): super().__init__(settable = False) self._source = src_pin self._min = min self._max = max self._center = center self._width = width self._upper_tilt = upper_tilt self._lower_tilt = lower_tilt self._noise = noise self._flip = flip self._raw_data_samples = raw_data_samples def get_value(self) -> float: return self.compute(self._source.get_value()) def set_value(self, value : float) -> None: """Raises NotImplementedError. This data provider type is read only""" raise NotImplementedError def compute(self, x : float) -> float: """ Compute the result from a simulated pinchoff curve using the configured inputs """ height : float = self._max - self._min sign : float = -1.0 if self._flip else 1.0 a : float = 6.4 / self._width b : float = a * self._center c : float = 2 / height d : float = self._min + (0.5 * height) t : float = 0.0 value = np.tanh((sign*a*x) - (sign*b))/c + d t = self._upper_tilt is_upper_half = ((x > self._center and not self._flip) or (x < self._center and self._flip)) if t and is_upper_half: value = value + (np.abs(x-self._center)/t) t = self._lower_tilt if t and not is_upper_half: value = value - (np.abs(x-self._center)/t) if (self._noise): value = value + np.random.normal() * height * self._noise return value @property def raw_data(self) -> Union[xr.DataArray, xr.Dataset]: """ Returns a snapshot of the function consisting of configured number of samples spanning the 2x the specified width around the center of the curve. """ x = np.linspace(self._center - self._width, self._center + self._width, self._raw_data_samples) y = [self.compute(val) for val in x] return xr.DataArray(y, dims=["x"], coords={"x": x}) ``` #### File: sim/mock_devices/mock_pin.py ```python from sim.data_provider import IDataProvider from sim.data_providers import StaticDataProvider from sim.mock_pin import IMockPin class MockPin(IMockPin): """Default Pin Device. Uses an IDataProvider to represent the value on the pin device """ def __init__(self, name: str) -> None: self._name = name self._data_provider: IDataProvider = StaticDataProvider(0.0) def __repr__(self) -> str: return self._name def __str__(self) -> str: return self._name @property def name(self) -> str: """Name of the pin""" return self._name def get_value(self) -> float: """Gets the current value on the input pin. Compatible with qcodes Parameter get_cmd argument. """ return self._data_provider.get_value() def set_value( self, value: float, ) -> None: """Set the value on the pin. Raises an error if the data provider backing this pin is read only """ self._data_provider.set_value(value) def set_data_provider( self, data_provider: IDataProvider, ) -> None: """change the data provider backing this pin""" self._data_provider = data_provider @property def settable(self) -> bool: """Indictates whether the value of this pin in settable or not""" return self._data_provider.settable ``` #### File: nanotune/sim/qcodes_utils.py ```python import logging import qcodes as qc class QcodesDbConfig: """Context Manager for temporarily switching the qcodes database to another """ def __init__(self, qcodes_db_path: str) -> None: """Initializes a QCoDeS database configurator. Args: qcodes_db_path: Path to database. """ self._db_path = qcodes_db_path self._orig_db_path = qc.config["core"]["db_location"] def __enter__(self) -> None: self._orig_db_path = qc.config["core"]["db_location"] qc.config["core"]["db_location"] = self._db_path logging.info("Changed qcodes db to %s", self._db_path) def __exit__(self, *exc) -> None: qc.config["core"]["db_location"] = self._orig_db_path logging.info("Restoring qcodes db to %s", self._orig_db_path) def dump_db(db_path: str) -> None: """ Utility method to dump the experiments, datasets, and parameters from a qcodes database """ # Some test code to get started with QcodesDbConfig(db_path): print("Path to DB : {0}".format(db_path)) for exp in qc.experiments(): print("Experiment name : {0}".format(exp.name)) print("dataset count : {0}".format(len(exp.data_sets()))) print("first dataset : {0}".format(exp.data_sets()[0].run_id)) last_id = exp.last_data_set().run_id print("last dataset : {0}".format(last_id)) print("") for dataset in exp.data_sets(): msg = [f"{p.label}({p.unit})" for p in dataset.get_parameters()] params = ", ".join(msg) print(f"{dataset.captured_run_id}: {dataset.exp_name} {params}") def enum_datasets(db_path): # Some test code to get started with QcodesDbConfig(db_path): print("Path to DB : {0}".format(db_path)) for exp in qc.experiments(): count = len(exp.data_sets()) print("Experiment name : {0}".format(exp.name)) print("dataset count : {0}".format(count)) if (count): print("first dataset : {0}".format(exp.data_sets()[0].run_id)) last_id = exp.last_data_set().run_id print("last dataset : {0}".format(last_id)) print("") for dataset in exp.data_sets(): msg = [f"{p.name} {p.type} : \"{p.label}\" ({p.unit})" for p in dataset.get_parameters()] params = " || ".join(msg) print(f"{dataset.captured_run_id}: {dataset.exp_name} {params}") yield dataset ``` #### File: sim/test/test_qcodes_mocks.py ```python from sim.mock_devices import MockDevice from sim.qcodes_mocks import MockDeviceInstrument, SimulationParameter from sim.mock_devices import MockPin def test_simulation_parameter_with_init(): """Verifies that a SimulationParameter instance can be properly initialized by QCoDeS when adding a parameter to an instrument with a default initial_value""" class TestDevice(MockDevice): def __init__(self): super().__init__( "mock_device", [ MockPin("pin") ] ) @property def pin(self): return self["pin"] class TestInstrument(MockDeviceInstrument): def __init__(self): super().__init__("mock_instr", TestDevice()) self.add_parameter( "src", parameter_class=SimulationParameter, unit="V", sim_pin=self.mock_device.pin, initial_value = 1.23 ) test_inst = TestInstrument() assert test_inst.src() == 1.23, "The Test Instrument failed to initialize the src parameter value" ```
{ "source": "jenshnielsen/pyvisa_mock", "score": 3 }
#### File: visa_mock/base/base_mocker.py ```python from inspect import signature from typing import Dict, Callable, Any, get_type_hints import re __tmp_scpi_dict__: Dict[str, Callable] = {} def cast_from_annotation(function): """ Cast function arguments to types given in the hints provided in the type annotation. Example: >>> @cast_from_annotation ... def f(x: int, y: float): ... return x + y >>> f("2", "3.4") ... 5.4 """ parameters = dict(signature(function).parameters) annotations = {} if "self" in parameters: annotations["self"] = lambda x: x annotations.update(get_type_hints(function)) annotations.pop("return", None) if len(annotations) != len(parameters): raise ValueError( "This decorator requires all arguments to be annotated" ) def inner(*args, **kwargs): kwargs.update(dict(zip(annotations, args))) new_kwargs = { name: annotations[name](value) for name, value in kwargs.items() } return function(**new_kwargs) return inner class MockingError(Exception): pass class MockerMetaClass(type): """ We need a custom metaclass as right after class declaration we need to modify class attributes: The `__scpi_dict__` needs to be populated """ def __new__(cls, *args, **kwargs): mocker_class = super().__new__(cls, *args, **kwargs) mocker_class.__scpi_dict__ = dict(__tmp_scpi_dict__) names = list(__tmp_scpi_dict__.keys()) for name in names: __tmp_scpi_dict__.pop(name) return mocker_class class BaseMocker(metaclass=MockerMetaClass): __scpi_dict__: Dict[str, Callable] = {} @classmethod def scpi(cls, scpi_string: str) -> Callable: def decorator(function): __tmp_scpi_dict__[scpi_string] = cast_from_annotation( function ) return decorator def send(self, scpi_string: str) -> Any: found = False args = None function = None for regex_pattern in self.__scpi_dict__: search_result = re.search(regex_pattern, scpi_string) if search_result: if not found: found = True function = self.__scpi_dict__[regex_pattern] args = search_result.groups() else: raise MockingError( f"SCPI command {scpi_string} matches multiple mocker " f"class entries" ) if not found: raise ValueError(f"Unknown SCPI command {scpi_string}") new_function = function return str(new_function(self, *args)) scpi = BaseMocker.scpi ``` #### File: test/mock_instruments/instruments.py ```python from collections import defaultdict from visa_mock.base.base_mocker import BaseMocker, scpi class Mocker1(BaseMocker): """ A mocker class mocking a multi channel voltage source. Voltages are zero by default """ def __init__(self) -> None: self._voltage = defaultdict(lambda: 0.0) @scpi(r":INSTR:CHANNEL(.*):VOLT (.*)") def _set_voltage(self, channel: int, value: float) -> None: self._voltage[channel] = value @scpi(r":INSTR:CHANNEL(.*):VOLT\?") def _get_voltage(self, channel: int) -> float: return self._voltage[channel] class Mocker2(BaseMocker): """ A mocker class mocking a multi channel voltage source. Voltages are zero by default """ def __init__(self): self._voltage = defaultdict(lambda: 0.0) @scpi(r":INSTR:CHANNEL(.*):VOLT (.*)") def _set_voltage(self, channel: int, value: float) -> None: self._voltage[channel] = value @scpi(r":INSTR:CHANNEL(.*):VOLT\?") def _get_voltage(self, channel: int) -> float: return 2 * self._voltage[channel] resources = { "MOCK0::mock1::INSTR": Mocker1(), "MOCK0::mock2::INSTR": Mocker2(), } ```
{ "source": "jenshnielsen/Qcodes_contrib_drivers", "score": 2 }
#### File: drivers/Oxford/ILM200.py ```python from time import sleep import visa import logging from qcodes import VisaInstrument class OxfordInstruments_ILM200(VisaInstrument): """ This is the qcodes driver for the Oxford Instruments ILM 200 Helium Level Meter. Usage: Initialize with <name> = instruments.create('name', 'OxfordInstruments_ILM200', address='<Instrument address>') <Instrument address> = ASRL4::INSTR Note: Since the ISOBUS allows for several instruments to be managed in parallel, the command which is sent to the device starts with '@n', where n is the ISOBUS instrument number. """ def __init__(self, name, address, number=1, **kwargs): """ Initializes the Oxford Instruments ILM 200 Helium Level Meter. Args: name (str): name of the instrument address (str): instrument address number (int): ISOBUS instrument number (number=1 is specific to the ILM in F008) Returns: None """ logging.debug(__name__ + ' : Initializing instrument') super().__init__(name, address, **kwargs) self.visa_handle.set_visa_attribute(visa.constants.VI_ATTR_ASRL_STOP_BITS, visa.constants.VI_ASRL_STOP_TWO) self._address = address self._number = number self._values = {} self.add_parameter('level', label='level', get_cmd=self._do_get_level, unit='%') self.add_parameter('status', get_cmd=self._do_get_status) self.add_parameter('rate', get_cmd=self._do_get_rate, set_cmd=self._do_set_rate) # a dummy command to avoid the initial error try: self.get_idn() sleep(70e-3) # wait for the device to be able to respond self._read() # to flush the buffer except Exception as ex: logging.debug(ex) def _execute(self, message): """ Write a command to the device and read answer. This function writes to the buffer by adding the device number at the front, instead of 'ask'. Args: message (str) : write command for the device Returns: None """ logging.info( __name__ + ' : Send the following command to the device: %s' % message) self.visa_handle.write('@%s%s' % (self._number, message)) sleep(70e-3) # wait for the device to be able to respond result = self._read() if result.find('?') >= 0: print("Error: Command %s not recognized" % message) else: return result def _read(self): """ Reads the total bytes in the buffer and outputs as a string. Args: None Returns: message (str) """ # because protocol has no termination chars the read reads the number # of bytes in the buffer bytes_in_buffer = self.visa_handle.bytes_in_buffer # a workaround for a timeout error in the pyvsia read_raw() function with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)): mes = self.visa_handle.visalib.read( self.visa_handle.session, bytes_in_buffer) # cannot be done on same line for some reason mes = str(mes[0].decode()) return mes def get_idn(self): """ Overrides the function of Instrument since ILM does not support `*IDN?` This string is supposed to be a comma-separated list of vendor, model, serial, and firmware, but semicolon and colon are also common separators so we accept them here as well. Returns: A dict containing vendor, model, serial, and firmware. """ try: idstr = '' # in case self.ask fails idstr = self._get_version().split() # form is supposed to be comma-separated, but we've seen # other separators occasionally idparts = [idstr[3] + ' ' + idstr[4], idstr[0], idstr[5], idstr[1] + ' ' + idstr[2]] # in case parts at the end are missing, fill in None if len(idparts) < 4: idparts += [None] * (4 - len(idparts)) except Exception as ex: logging.warn('Error getting or interpreting *IDN?: ' + repr(idstr)) logging.debug(ex) idparts = [None, None, None, None] return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts)) def get_all(self): """ Reads all implemented parameters from the instrument, and updates the wrapper. """ logging.info(__name__ + ' : reading all settings from instrument') self.level.get() self.status.get() self.rate.get() def close(self): """ Safely close connection """ logging.info(__name__ + ' : Closing ILM200 connection') self.local() super().close() # Functions: Monitor commands def _get_version(self): """ Identify the device Args: None Returns: identification (str): should be 'ILM200 Version 1.08 (c) OXFORD 1994\r' """ logging.info(__name__ + ' : Identify the device') return self._execute('V') def _do_get_level(self): """ Get Helium level of channel 1. Args: None Returns: result (float) : Helium level """ logging.info(__name__ + ' : Read level of channel 1') result = self._execute('R1') return float(result.replace("R", "")) / 10 def _do_get_status(self): """ Get status of the device. """ logging.info(__name__ + ' : Get status of the device.') result = self._execute('X') usage = { 0: "Channel not in use", 1: "Channel used for Nitrogen level", 2: "Channel used for Helium Level (Normal pulsed operation)", 3: "Channel used for Helium Level (Continuous measurement)", 9: "Error on channel (Usually means probe unplugged)" } # current_flowing = { # 0 : "Curent not flowing in Helium Probe Wire", # 1 : "Curent not flowing in Helium Probe Wire" # } # auto_fill_status = { # 00 : "End Fill (Level > FULL)", # 01 : "Not Filling (Level < FULL, Level > FILL)", # 10 : "Filling (Level < FULL, Level > FILL)", # 11 : "Start Filling (Level < FILL)" # } return usage.get(int(result[1]), "Unknown") def _do_get_rate(self): """ Get helium meter channel 1 probe rate Input: None Output: rate(int) : 0 : "SLOW" 1 : "FAST" """ rate = { 1: "1 : Helium Probe in FAST rate", 0: "0 : Helium Probe in SLOW rate" } result = self._execute('X') return rate.get(int(format(int(result[5:7]), '08b')[6]), "Unknown") def remote(self): """ Set control to remote & locked """ logging.info(__name__ + ' : Set control to remote & locked') self.set_remote_status(1) def local(self): """ Set control to local & locked """ logging.info(__name__ + ' : Set control to local & locked') self.set_remote_status(0) def set_remote_status(self, mode): """ Set remote control status. Args: mode(int) : 0 : "Local and locked", 1 : "Remote and locked", 2 : "Local and unlocked", 3 : "Remote and unlocked", Returns: None """ status = { 0: "Local and locked", 1: "Remote and locked", 2: "Local and unlocked", 3: "Remote and unlocked", } logging.info(__name__ + ' : Setting remote control status to %s' % status.get(mode, "Unknown")) self._execute('C%s' % mode) # Functions: Control commands (only recognised when in REMOTE control) def set_to_slow(self): """ Set helium meter channel 1 to slow mode. """ self.set_remote_status(1) logging.info(__name__ + ' : Setting Helium Probe in SLOW rate') self._execute('S1') self.set_remote_status(3) def set_to_fast(self): """ Set helium meter channel 1 to fast mode. """ self.set_remote_status(1) logging.info(__name__ + ' : Setting Helium Probe in FAST rate') self._execute('T1') self.set_remote_status(3) def _do_set_rate(self, rate): """ Set helium meter channel 1 probe rate Args: rate(int) : 0 : "SLOW" 1 : "FAST" """ self.set_remote_status(1) if rate == 0: self.set_to_slow() elif rate == 1: self.set_to_fast() self.set_remote_status(3) logging.info(self._do_get_rate()) ```
{ "source": "jenshnielsen/sphinx", "score": 2 }
#### File: sphinx/sphinx/config.py ```python import re from os import path, getenv from six import PY2, PY3, iteritems, string_types, binary_type, text_type, integer_types from typing import Any, NamedTuple, Union from sphinx.errors import ConfigError from sphinx.locale import l_, __ from sphinx.util import logging from sphinx.util.i18n import format_date from sphinx.util.osutil import cd from sphinx.util.pycompat import execfile_, NoneType if False: # For type annotation from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple, Union # NOQA from sphinx.util.tags import Tags # NOQA logger = logging.getLogger(__name__) nonascii_re = re.compile(br'[\x80-\xff]') copyright_year_re = re.compile(r'^((\d{4}-)?)(\d{4})(?=[ ,])') CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s" if PY3: CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?" CONFIG_EXIT_ERROR = "The configuration file (or one of the modules it imports) " \ "called sys.exit()" CONFIG_ENUM_WARNING = "The config value `{name}` has to be a one of {candidates}, " \ "but `{current}` is given." CONFIG_PERMITTED_TYPE_WARNING = "The config value `{name}' has type `{current.__name__}', " \ "expected to {permitted}." CONFIG_TYPE_WARNING = "The config value `{name}' has type `{current.__name__}', " \ "defaults to `{default.__name__}'." if PY3: unicode = str # special alias for static typing... ConfigValue = NamedTuple('ConfigValue', [('name', str), ('value', Any), ('rebuild', Union[bool, unicode])]) class ENUM(object): """represents the config value should be a one of candidates. Example: app.add_config_value('latex_show_urls', 'no', None, ENUM('no', 'footnote', 'inline')) """ def __init__(self, *candidates): # type: (unicode) -> None self.candidates = candidates def match(self, value): # type: (Union[unicode,List,Tuple]) -> bool if isinstance(value, (list, tuple)): return all(item in self.candidates for item in value) else: return value in self.candidates string_classes = [text_type] # type: List if PY2: string_classes.append(binary_type) # => [str, unicode] class Config(object): """ Configuration file abstraction. """ # the values are: (default, what needs to be rebuilt if changed) # If you add a value here, don't forget to include it in the # quickstart.py file template as well as in the docs! config_values = dict( # general options project = ('Python', 'env'), copyright = ('', 'html'), version = ('', 'env'), release = ('', 'env'), today = ('', 'env'), # the real default is locale-dependent today_fmt = (None, 'env', string_classes), language = (None, 'env', string_classes), locale_dirs = (['locales'], 'env'), figure_language_filename = (u'{root}.{language}{ext}', 'env', [str]), master_doc = ('contents', 'env'), source_suffix = (['.rst'], 'env'), source_encoding = ('utf-8-sig', 'env'), source_parsers = ({}, 'env'), exclude_patterns = ([], 'env'), default_role = (None, 'env', string_classes), add_function_parentheses = (True, 'env'), add_module_names = (True, 'env'), trim_footnote_reference_space = (False, 'env'), show_authors = (False, 'env'), pygments_style = (None, 'html', string_classes), highlight_language = ('default', 'env'), highlight_options = ({}, 'env'), templates_path = ([], 'html'), template_bridge = (None, 'html', string_classes), keep_warnings = (False, 'env'), suppress_warnings = ([], 'env'), modindex_common_prefix = ([], 'html'), rst_epilog = (None, 'env', string_classes), rst_prolog = (None, 'env', string_classes), trim_doctest_flags = (True, 'env'), primary_domain = ('py', 'env', [NoneType]), needs_sphinx = (None, None, string_classes), needs_extensions = ({}, None), nitpicky = (False, None), nitpick_ignore = ([], None), numfig = (False, 'env'), numfig_secnum_depth = (1, 'env'), numfig_format = ({'section': l_('Section %s'), 'figure': l_('Fig. %s'), 'table': l_('Table %s'), 'code-block': l_('Listing %s')}, 'env'), tls_verify = (True, 'env'), tls_cacerts = (None, 'env'), ) # type: Dict[unicode, Tuple] def __init__(self, dirname, filename, overrides, tags): # type: (unicode, unicode, Dict, Tags) -> None self.overrides = overrides self.values = Config.config_values.copy() config = {} # type: Dict[unicode, Any] if dirname is not None: config_file = path.join(dirname, filename) config['__file__'] = config_file config['tags'] = tags with cd(dirname): # we promise to have the config dir as current dir while the # config file is executed try: execfile_(filename, config) except SyntaxError as err: raise ConfigError(CONFIG_SYNTAX_ERROR % err) except SystemExit: raise ConfigError(CONFIG_EXIT_ERROR) self._raw_config = config # these two must be preinitialized because extensions can add their # own config values self.setup = config.get('setup', None) # type: Callable if 'extensions' in overrides: if isinstance(overrides['extensions'], string_types): config['extensions'] = overrides.pop('extensions').split(',') else: config['extensions'] = overrides.pop('extensions') self.extensions = config.get('extensions', []) # type: List[unicode] # correct values of copyright year that are not coherent with # the SOURCE_DATE_EPOCH environment variable (if set) # See https://reproducible-builds.org/specs/source-date-epoch/ if getenv('SOURCE_DATE_EPOCH') is not None: for k in ('copyright', 'epub_copyright'): if k in config: config[k] = copyright_year_re.sub(r'\g<1>%s' % format_date('%Y'), config[k]) def check_types(self): # type: () -> None # check all values for deviation from the default value's type, since # that can result in TypeErrors all over the place # NB. since config values might use l_() we have to wait with calling # this method until i18n is initialized for name in self._raw_config: if name not in self.values: continue # we don't know a default value settings = self.values[name] default, dummy_rebuild = settings[:2] permitted = settings[2] if len(settings) == 3 else () if hasattr(default, '__call__'): default = default(self) # could invoke l_() if default is None and not permitted: continue # neither inferrable nor expliclitly permitted types current = self[name] if isinstance(permitted, ENUM): if not permitted.match(current): logger.warning(CONFIG_ENUM_WARNING.format( name=name, current=current, candidates=permitted.candidates)) else: if type(current) is type(default): continue if type(current) in permitted: continue common_bases = (set(type(current).__bases__ + (type(current),)) & set(type(default).__bases__)) common_bases.discard(object) if common_bases: continue # at least we share a non-trivial base class if permitted: logger.warning(CONFIG_PERMITTED_TYPE_WARNING.format( name=name, current=type(current), permitted=str([cls.__name__ for cls in permitted]))) else: logger.warning(CONFIG_TYPE_WARNING.format( name=name, current=type(current), default=type(default))) def check_unicode(self): # type: () -> None # check all string values for non-ASCII characters in bytestrings, # since that can result in UnicodeErrors all over the place for name, value in iteritems(self._raw_config): if isinstance(value, binary_type) and nonascii_re.search(value): logger.warning('the config value %r is set to a string with non-ASCII ' 'characters; this can lead to Unicode errors occurring. ' 'Please use Unicode strings, e.g. %r.', name, u'Content') def convert_overrides(self, name, value): # type: (unicode, Any) -> Any if not isinstance(value, string_types): return value else: defvalue = self.values[name][0] if isinstance(defvalue, dict): raise ValueError(__('cannot override dictionary config setting %r, ' 'ignoring (use %r to set individual elements)') % (name, name + '.key=value')) elif isinstance(defvalue, list): return value.split(',') elif isinstance(defvalue, integer_types): try: return int(value) except ValueError: raise ValueError(__('invalid number %r for config value %r, ignoring') % (value, name)) elif hasattr(defvalue, '__call__'): return value elif defvalue is not None and not isinstance(defvalue, string_types): raise ValueError(__('cannot override config setting %r with unsupported ' 'type, ignoring') % name) else: return value def pre_init_values(self): # type: () -> None """ Initialize some limited config variables before initialize i18n and loading extensions """ variables = ['needs_sphinx', 'suppress_warnings', 'language', 'locale_dirs'] for name in variables: try: if name in self.overrides: self.__dict__[name] = self.convert_overrides(name, self.overrides[name]) elif name in self._raw_config: self.__dict__[name] = self._raw_config[name] except ValueError as exc: logger.warning("%s", exc) def init_values(self): # type: () -> None config = self._raw_config for valname, value in iteritems(self.overrides): try: if '.' in valname: realvalname, key = valname.split('.', 1) config.setdefault(realvalname, {})[key] = value continue elif valname not in self.values: logger.warning(__('unknown config value %r in override, ignoring'), valname) continue if isinstance(value, string_types): config[valname] = self.convert_overrides(valname, value) else: config[valname] = value except ValueError as exc: logger.warning("%s", exc) for name in config: if name in self.values: self.__dict__[name] = config[name] if isinstance(self.source_suffix, string_types): # type: ignore self.source_suffix = [self.source_suffix] # type: ignore def __getattr__(self, name): # type: (unicode) -> Any if name.startswith('_'): raise AttributeError(name) if name not in self.values: raise AttributeError(__('No such config value: %s') % name) default = self.values[name][0] if hasattr(default, '__call__'): return default(self) return default def __getitem__(self, name): # type: (unicode) -> unicode return getattr(self, name) def __setitem__(self, name, value): # type: (unicode, Any) -> None setattr(self, name, value) def __delitem__(self, name): # type: (unicode) -> None delattr(self, name) def __contains__(self, name): # type: (unicode) -> bool return name in self.values def __iter__(self): # type: () -> Iterable[ConfigValue] for name, value in iteritems(self.values): yield ConfigValue(name, getattr(self, name), value[1]) # type: ignore def add(self, name, default, rebuild, types): # type: (unicode, Any, Union[bool, unicode], Any) -> None self.values[name] = (default, rebuild, types) def filter(self, rebuild): # type: (str) -> Iterator[ConfigValue] return (value for value in self if value.rebuild == rebuild) # type: ignore ```
{ "source": "jenshnielsen/versioningit", "score": 2 }
#### File: src/versioningit/config.py ```python from dataclasses import Field, dataclass, field, fields from pathlib import Path from typing import Any, Dict, Optional, Union import tomli from .errors import ConfigError, NotVersioningitError from .logging import log, warn_extra_fields from .methods import ( CallableSpec, CustomMethodSpec, EntryPointSpec, MethodSpec, VersioningitMethod, ) from .util import optional_str_guard @dataclass class ConfigSection: """A parsed method subtable of the `versioningit` configuration""" #: Specification of the method for this subtable method_spec: MethodSpec #: Additional parameters to pass to the method params: Dict[str, Any] def load(self, project_dir: Union[str, Path]) -> VersioningitMethod: """Loads the method and returns a `VersioningitMethod`""" return VersioningitMethod(self.method_spec.load(project_dir), self.params) @dataclass class Config: """Parsed `versioningit` configuration""" #: Parsed ``vcs`` subtable vcs: ConfigSection = field( metadata={"default_entry_point": "git", "forbidden_params": ["project_dir"]} ) #: Parsed ``tag2version`` subtable tag2version: ConfigSection = field( metadata={"default_entry_point": "basic", "forbidden_params": ["tag"]} ) #: Parsed ``next-version`` subtable next_version: ConfigSection = field( metadata={ "key": "next-version", "default_entry_point": "minor", "forbidden_params": ["version", "branch"], } ) #: Parsed ``format`` subtable format: ConfigSection = field( metadata={ "default_entry_point": "basic", "forbidden_params": ["description", "version", "next_version"], } ) #: Parsed ``write`` subtable write: ConfigSection = field( metadata={ "default_entry_point": "basic", "forbidden_params": ["project_dir", "version"], } ) #: The ``default-version`` setting default_version: Optional[str] = None @classmethod def parse_toml_file(cls, filepath: Union[str, Path]) -> "Config": """ Parse the ``[tool.versioningit]`` table in the given TOML file :raises NotVersioningitError: if the file does not contain a ``[tool.versioningit]`` table :raises ConfigError: if the ``tool.versioningit`` key or any of its subfields are not of the correct type """ with open(filepath, "rb") as fp: data = tomli.load(fp).get("tool", {}).get("versioningit") if data is None: raise NotVersioningitError("versioningit not enabled in pyproject.toml") return cls.parse_obj(data) @classmethod def parse_obj(cls, obj: Any) -> "Config": """ Parse a raw Python configuration structure :raises ConfigError: - if ``obj`` is not a `dict` - if ``default-version`` or any of the subtable or ``method`` fields are not of the correct type """ if not isinstance(obj, dict): raise ConfigError("tool.versioningit must be a table") default_version = optional_str_guard( obj.pop("default-version", None), "tool.versioningit.default-version" ) sections: Dict[str, ConfigSection] = {} for f in fields(cls): if f.type is not ConfigSection: continue key = f.metadata.get("key", f.name) sections[f.name] = cls.parse_section(f, obj.pop(key, None)) warn_extra_fields( obj, "tool.versioningit", [f.metadata.get("key", f.name) for f in fields(cls)], ) return cls(default_version=default_version, **sections) @staticmethod def parse_section(f: Field, obj: Any) -> "ConfigSection": """ Parse a ``tool.versioniningit.STEP`` field according to the metadata in the given `dataclasses.Field`, which must consist of the following items: ``key`` : string The key used for the step in the ``[tool.versioningit]`` table. If not specified, defaults to the field's ``name`` attribute. ``default_entry_point`` : string The name of the default method to use for the step if one is not specified in the configuration ``forbidden_params`` : list of strings Names of non-user-supplied-parameter arguments passed to the step's method that must be discarded if found among the parameters :raises ConfigError: - if ``obj`` is not `None`, a callable, a string, or a `dict` - if any of the ``method`` fields are not of the correct type """ key = f.metadata.get("key", f.name) if obj is None or isinstance(obj, str): method_spec = Config.parse_method_spec(f, obj) return ConfigSection(method_spec, {}) elif callable(obj): method_spec = CallableSpec(obj) return ConfigSection(method_spec, {}) elif isinstance(obj, dict): if "method" not in obj and "module" in obj and "value" in obj: method_spec = Config.parse_method_spec(f, obj) return ConfigSection(method_spec, {}) method_spec = Config.parse_method_spec(f, obj.pop("method", None)) for p in f.metadata["forbidden_params"]: if p in obj: log.warning( "tool.versioningit.%s cannot contain %r field; discarding", key, p, ) obj.pop(p) return ConfigSection(method_spec, obj) else: raise ConfigError(f"tool.versioningit.{key} must be a string or table") @staticmethod def parse_method_spec(f: Field, method: Any) -> MethodSpec: """ Parse a ``method`` field according to the metadata in the given `dataclasses.Field` (see `parse_section()`) :raises ConfigError: - if ``method`` is not `None`, a string, a callable, or a `dict` - if ``method`` is a `dict` without a ``module`` or ``value`` key - if any of the fields in ``method`` are not of the correct type """ key = f.metadata.get("key", f.name) if method is None: return EntryPointSpec( group=f"versioningit.{f.name}", name=f.metadata["default_entry_point"] ) elif isinstance(method, str): return EntryPointSpec(group=f"versioningit.{f.name}", name=method) elif callable(method): return CallableSpec(method) elif isinstance(method, dict): module = method.pop("module", None) if not isinstance(module, str): raise ConfigError( f"tool.versioningit.{key}.method.module is required and" " must be a string" ) value = method.pop("value", None) if not isinstance(value, str): raise ConfigError( f"tool.versioningit.{key}.method.value is required and" " must be a string" ) module_dir = method.pop("module-dir", None) if module_dir is not None and not isinstance(module_dir, str): raise ConfigError( f"tool.versioningit.{key}.method.module-dir must be a string" ) warn_extra_fields( method, f"tool.versioningit.{key}.method", ["module", "value", "module-dir"], ) return CustomMethodSpec(module, value, module_dir) else: raise ConfigError( f"tool.versioningit.{key}.method must be a string or table" ) ``` #### File: src/versioningit/git.py ```python from dataclasses import dataclass from datetime import datetime from pathlib import Path import re import subprocess from typing import Any, List, NamedTuple, Optional, Union from .core import VCSDescription from .errors import NoTagError, NotVCSError from .logging import log, warn_extra_fields from .util import ( fromtimestamp, get_build_date, is_sdist, list_str_guard, optional_str_guard, readcmd, runcmd, ) DEFAULT_DATE = fromtimestamp(0) class Describe(NamedTuple): """ Parsed representation of `git describe` output when it includes all three fields and does not end with "``-dirty``" """ #: The most recent tag tag: str #: The number of commits since the tag distance: int #: The abbreviated hash of the HEAD commit rev: str @classmethod def parse(cls, s: str) -> "Describe": m = re.fullmatch(r"(?P<tag>.+)-(?P<distance>[0-9]+)-g(?P<rev>[0-9a-f]+)?", s) if not m: raise ValueError("Could not parse `git describe` output") tag = m["tag"] assert isinstance(tag, str) distance = int(m["distance"]) rev = m["rev"] assert isinstance(rev, str) return cls(tag, distance, rev) @dataclass class GitRepo: """Methods for querying a Git repository""" #: The repository's working tree or a subdirectory thereof path: Union[str, Path] def ensure_is_repo(self) -> None: """ Test whether `path` is under Git revision control; if it is not (or if Git is not installed), raise a `NotVCSError` """ try: if ( self.read( "rev-parse", "--is-inside-work-tree", stderr=subprocess.DEVNULL ) == "false" ): # We are inside a .git directory raise NotVCSError(f"{self.path} is not in a Git working tree") except FileNotFoundError: raise NotVCSError("Git not installed; assuming this isn't a Git repository") except subprocess.CalledProcessError: raise NotVCSError(f"{self.path} is not in a Git repository") try: # Check whether `path` is tracked by Git (Note that we can't rely # on this check alone, as it succeeds when inside a .git/ # directory) runcmd( "git", "-C", self.path, "ls-files", "--error-unmatch", ".", stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) except subprocess.CalledProcessError: raise NotVCSError(f"{self.path} is not tracked by Git") def read(self, *args: str, **kwargs: Any) -> str: """ Run a Git command with the given arguments in `path` and return the stripped stdout """ return readcmd("git", "-C", self.path, *args, **kwargs) def describe(self, match: List[str], exclude: List[str]) -> str: """ Run ``git describe --tags --long --dirty --always`` with the given arguments to ``--match`` and ``--exclude`` in the repository; if the command fails, raises `NoTagError` """ cmd = ["describe", "--tags", "--long", "--dirty", "--always"] for pat in match: cmd.append(f"--match={pat}") for pat in exclude: cmd.append(f"--exclude={pat}") try: return self.read(*cmd, stderr=subprocess.PIPE) except subprocess.CalledProcessError as e: # As far as I'm aware, this only happens in a repo without any # commits or a corrupted repo. raise NoTagError(f"`git describe` command failed: {e.stderr.strip()}") def get_branch(self) -> Optional[str]: """ Return the name of the current branch, or `None` if the repository is in a detached HEAD state """ try: return self.read( "symbolic-ref", "--short", "-q", "HEAD", stderr=subprocess.DEVNULL ) except subprocess.CalledProcessError: return None def describe_git(*, project_dir: Union[str, Path], **kwargs: Any) -> VCSDescription: """Implements the ``"git"`` ``vcs`` method""" match = list_str_guard(kwargs.pop("match", []), "tool.versioningit.vcs.match") exclude = list_str_guard(kwargs.pop("exclude", []), "tool.versioningit.vcs.exclude") default_tag = optional_str_guard( kwargs.pop("default-tag", None), "tool.versioningit.vcs.default-tag" ) warn_extra_fields( kwargs, "tool.versioningit.vcs", ["match", "exclude", "default-tag"] ) build_date = get_build_date() repo = GitRepo(project_dir) repo.ensure_is_repo() vdesc = describe_git_core(repo, build_date, match, exclude, default_tag) if "revision" not in vdesc.fields: revision, author_ts, committer_ts = repo.read( "--no-pager", "show", "-s", "--format=%H%n%at%n%ct" ).splitlines() vdesc.fields["revision"] = revision vdesc.fields["author_date"] = min(build_date, fromtimestamp(int(author_ts))) vdesc.fields["committer_date"] = min( build_date, fromtimestamp(int(committer_ts)) ) return vdesc def describe_git_archive( *, project_dir: Union[str, Path], **kwargs: Any ) -> VCSDescription: """Implements the ``"git-archive"`` ``vcs`` method""" match = list_str_guard(kwargs.pop("match", []), "tool.versioningit.vcs.match") exclude = list_str_guard(kwargs.pop("exclude", []), "tool.versioningit.vcs.exclude") default_tag = optional_str_guard( kwargs.pop("default-tag", None), "tool.versioningit.vcs.default-tag" ) describe_subst = optional_str_guard( kwargs.pop("describe-subst", None), "tool.versioningit.vcs.describe-subst" ) warn_extra_fields( kwargs, "tool.versioningit.vcs", ["match", "exclude", "default-tag", "describe-subst"], ) build_date = get_build_date() repo = GitRepo(project_dir) try: repo.ensure_is_repo() except NotVCSError: if is_sdist(project_dir): pass elif describe_subst is None: log.warning( "This appears to be a Git archive, yet" " tool.versioningit.vcs.describe-subst is not set" ) elif describe_subst == "": raise NoTagError( "tool.versioningit.vcs.describe-subst is empty in Git archive" ) elif describe_subst.startswith("$Format"): raise NoTagError( "tool.versioningit.vcs.describe-subst not expanded in Git archive" ) else: log.info( "Parsing version information from describe-subst = %r", describe_subst ) try: tag, distance, rev = Describe.parse(describe_subst) except ValueError: tag = describe_subst distance = 0 rev = "0" * 7 return VCSDescription( tag=tag, state="distance" if distance else "exact", branch=None, fields={ "distance": distance, "rev": rev, "build_date": build_date, "vcs": "g", "vcs_name": "git", }, ) raise if describe_subst is None: log.warning( "Using git-archive yet tool.versioningit.vcs.describe-subst is not set" ) elif not re.fullmatch(r"\$Format:%\(describe(?::.*)?\)\$", describe_subst): log.warning( "tool.versioningit.vcs.describe-subst does not appear to be set to" " a valid $Format:%%(describe)$ placeholder" ) vdesc = describe_git_core(repo, build_date, match, exclude, default_tag) vdesc.fields.pop("revision", None) vdesc.fields.pop("author_date", None) vdesc.fields.pop("committer_date", None) return vdesc def describe_git_core( repo: GitRepo, build_date: datetime, match: List[str], exclude: List[str], default_tag: Optional[str], ) -> VCSDescription: """Common functionality of the ``"git"`` and ``"git-archive"`` methods""" try: description = repo.describe(match, exclude) except NoTagError as e: # There are no commits in the repo if default_tag is not None: log.error("%s", e) log.info("Falling back to default tag %r", default_tag) return VCSDescription( tag=default_tag, state="dirty", branch=repo.get_branch(), fields={ "distance": 0, "rev": "0" * 7, "revision": "0" * 40, "author_date": min(build_date, DEFAULT_DATE), "committer_date": min(build_date, DEFAULT_DATE), "build_date": build_date, "vcs": "g", "vcs_name": "git", }, ) else: raise if description.endswith("-dirty"): dirty = True description = description[: -len("-dirty")] else: dirty = False try: tag, distance, rev = Describe.parse(description) except ValueError: if default_tag is not None: log.info( "`git describe` returned a hash instead of a tag; falling back to" " default tag %r", default_tag, ) tag = default_tag distance = int(repo.read("rev-list", "--count", "HEAD")) - 1 rev = description else: raise NoTagError("`git describe` could not find a tag") if distance and dirty: state = "distance-dirty" elif distance: state = "distance" elif dirty: state = "dirty" else: state = "exact" return VCSDescription( tag=tag, state=state, branch=repo.get_branch(), fields={ "distance": distance, "rev": rev, "build_date": build_date, "vcs": "g", "vcs_name": "git", }, ) ``` #### File: versioningit/test/test_format.py ```python from datetime import datetime, timezone from typing import Any, Dict import pytest from versioningit.basics import basic_format from versioningit.core import VCSDescription from versioningit.errors import ConfigError BUILD_DATE = datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc) @pytest.mark.parametrize( "description,version,next_version,params,r", [ ( VCSDescription( tag="v0.1.0", state="distance", branch="main", fields={ "distance": 5, "vcs": "g", "rev": "abcdef0", "build_date": BUILD_DATE, }, ), "0.1.0", "0.2.0", {}, "0.1.0.post5+gabcdef0", ), ( VCSDescription( tag="v0.1.0", state="dirty", branch="main", fields={ "distance": 0, "vcs": "g", "rev": "abcdef0", "build_date": BUILD_DATE, }, ), "0.1.0", "0.2.0", {}, "0.1.0+d20380119", ), ( VCSDescription( tag="v0.1.0", state="distance-dirty", branch="main", fields={ "distance": 5, "vcs": "g", "rev": "abcdef0", "build_date": BUILD_DATE, }, ), "0.1.0", "0.2.0", {}, "0.1.0.post5+gabcdef0.d20380119", ), ( VCSDescription( tag="v0.1.0", state="distance", branch="main", fields={ "distance": 5, "vcs": "g", "rev": "abcdef0", "build_date": BUILD_DATE, }, ), "0.1.0", "0.2.0", {"distance": "{next_version}.dev{distance}+{vcs}{rev}"}, "0.2.0.dev5+gabcdef0", ), ( VCSDescription( tag="v0.1.0", state="distance", branch="feature/acme", fields={ "distance": 5, "vcs": "g", "rev": "abcdef0", "build_date": BUILD_DATE, }, ), "0.1.0", "0.2.0", {"distance": "{next_version}+{branch}.{rev}"}, "0.2.0+feature.acme.abcdef0", ), ( VCSDescription( tag="v0.1.0", state="distance", branch=None, fields={ "distance": 5, "vcs": "g", "rev": "abcdef0", "build_date": BUILD_DATE, }, ), "0.1.0", "0.2.0", {"distance": "{next_version}+{branch}.{rev}"}, "0.2.0+None.abcdef0", ), ( VCSDescription( tag="v0.1.0", state="weird", branch="main", fields={ "distance": 5, "vcs": "g", "rev": "abcdef0", "build_date": BUILD_DATE, }, ), "0.1.0", "0.2.0", {"weird": "{version}+{branch}.{build_date:%Y.%m.%d}"}, "0.1.0+main.2038.01.19", ), ], ) def test_basic_format( caplog: pytest.LogCaptureFixture, description: VCSDescription, version: str, next_version: str, params: Dict[str, Any], r: str, ) -> None: assert ( basic_format( description=description, version=version, next_version=next_version, **params ) == r ) assert caplog.record_tuples == [] def test_basic_format_invalid_state(caplog: pytest.LogCaptureFixture) -> None: with pytest.raises(ConfigError) as excinfo: basic_format( description=VCSDescription( tag="v0.1.0", state="weird", branch="main", fields={ "distance": 5, "vcs": "g", "rev": "abcdef0", "build_date": BUILD_DATE, }, ), version="0.1.0", next_version="0.2.0", ) assert str(excinfo.value) == ( "No format string for 'weird' state found in tool.versioningit.format" ) assert caplog.record_tuples == [] ``` #### File: versioningit/test/test_hg.py ```python from datetime import datetime, timezone import json import logging from operator import attrgetter from pathlib import Path import shutil import subprocess from typing import Any, Dict import pytest from versioningit.core import VCSDescription from versioningit.errors import NoTagError, NotVCSError from versioningit.hg import HGRepo, describe_hg, parse_hg_archival pytestmark = pytest.mark.skipif( shutil.which("hg") is None, reason="Mercurial not installed" ) BUILD_DATE = datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc) DATA_DIR = Path(__file__).with_name("data") @pytest.mark.parametrize( "repo,params,description", [ ( "exact", {}, VCSDescription( tag="v0.1.0", state="exact", branch="default", fields={ "distance": 0, "rev": "9eaa2ff36144", "revision": "9eaa2ff36144315dcaea2a1eff0a1a340f0bc30a", "build_date": BUILD_DATE, "vcs": "h", "vcs_name": "hg", }, ), ), ( "distance", {}, VCSDescription( tag="v0.1.0", state="distance", branch="default", fields={ "distance": 1, "rev": "42ab202e999a", "revision": "42ab202e999a827488efaf13ba0e9acc2ff04c86", "build_date": BUILD_DATE, "vcs": "h", "vcs_name": "hg", }, ), ), ( "distance-dirty", {}, VCSDescription( tag="v0.1.0", state="distance-dirty", branch="default", fields={ "distance": 1, "rev": "42ab202e999a", "revision": "42ab202e999a827488efaf13ba0e9acc2ff04c86", "build_date": BUILD_DATE, "vcs": "h", "vcs_name": "hg", }, ), ), ( "default-tag", {"default-tag": "v0.0.0"}, VCSDescription( tag="v0.0.0", state="distance", branch="default", fields={ "distance": 1, "rev": "0234348fd175", "revision": "0234348fd175c227aefb8e51a55b79acc33d6f61", "build_date": BUILD_DATE, "vcs": "h", "vcs_name": "hg", }, ), ), ( "pattern", {"pattern": r"re:^v"}, VCSDescription( tag="v0.1.0", state="distance", branch="default", fields={ "distance": 4, "rev": "7850f9f76320", "revision": "7850f9f763204ca4c886db3e5fc46be8f4472583", "build_date": BUILD_DATE, "vcs": "h", "vcs_name": "hg", }, ), ), ( "pattern", {}, VCSDescription( tag="0.2.0", state="distance", branch="default", fields={ "distance": 1, "rev": "7850f9f76320", "revision": "7850f9f763204ca4c886db3e5fc46be8f4472583", "build_date": BUILD_DATE, "vcs": "h", "vcs_name": "hg", }, ), ), ], ) def test_describe_hg( repo: str, params: Dict[str, Any], description: VCSDescription, tmp_path: Path ) -> None: shutil.unpack_archive(str(DATA_DIR / "repos" / "hg" / f"{repo}.zip"), str(tmp_path)) desc = describe_hg(project_dir=tmp_path, **params) assert desc == description assert desc.fields["build_date"].tzinfo is timezone.utc @pytest.mark.parametrize("repo", ["default-tag", "default-tag-archive"]) def test_describe_hg_no_tag(repo: str, tmp_path: Path) -> None: shutil.unpack_archive(str(DATA_DIR / "repos" / "hg" / f"{repo}.zip"), str(tmp_path)) with pytest.raises(NoTagError) as excinfo: describe_hg(project_dir=tmp_path) assert str(excinfo.value) == "No latest tag in Mercurial repository" def test_describe_hg_no_repo(tmp_path: Path) -> None: with pytest.raises(NotVCSError) as excinfo: describe_hg(project_dir=tmp_path) assert str(excinfo.value) == f"{tmp_path} is not tracked by Mercurial" @pytest.mark.parametrize("params", [{}, {"default-tag": "0.0.0"}]) def test_describe_hg_no_commits(tmp_path: Path, params: Dict[str, Any]) -> None: subprocess.run(["hg", "--cwd", str(tmp_path), "init"], check=True) with pytest.raises(NotVCSError) as excinfo: describe_hg(project_dir=tmp_path, **params) assert str(excinfo.value) == f"{tmp_path} is not tracked by Mercurial" def test_describe_hg_added_no_commits(tmp_path: Path) -> None: shutil.unpack_archive( str(DATA_DIR / "repos" / "hg" / "added-no-commits-default-tag.zip"), str(tmp_path), ) with pytest.raises(NoTagError) as excinfo: describe_hg(project_dir=tmp_path) assert str(excinfo.value) == "No latest tag in Mercurial repository" def test_describe_hg_added_no_commits_default_tag( caplog: pytest.LogCaptureFixture, tmp_path: Path ) -> None: shutil.unpack_archive( str(DATA_DIR / "repos" / "hg" / "added-no-commits-default-tag.zip"), str(tmp_path), ) assert describe_hg( project_dir=tmp_path, **{"default-tag": "0.0.0"} ) == VCSDescription( tag="0.0.0", state="dirty", branch="default", fields={ "distance": 0, "rev": "0" * 12, "revision": "0" * 40, "build_date": BUILD_DATE, "vcs": "h", "vcs_name": "hg", }, ) assert ( "versioningit", logging.INFO, "No latest tag; falling back to default tag '0.0.0'", ) in caplog.record_tuples def test_ensure_is_repo_not_tracked(tmp_path: Path) -> None: shutil.unpack_archive(str(DATA_DIR / "repos" / "hg" / "exact.zip"), str(tmp_path)) (tmp_path / "subdir").mkdir() (tmp_path / "subdir" / "file.txt").touch() with pytest.raises(NotVCSError) as excinfo: HGRepo(tmp_path / "subdir").ensure_is_repo() assert str(excinfo.value) == f"{tmp_path / 'subdir'} is not tracked by Mercurial" def test_ensure_is_repo_dot_hg_dir(tmp_path: Path) -> None: subprocess.run(["hg", "--cwd", str(tmp_path), "init"], check=True) with pytest.raises(NotVCSError) as excinfo: HGRepo(tmp_path / ".hg").ensure_is_repo() assert str(excinfo.value) == f"{tmp_path / '.hg'} is not tracked by Mercurial" @pytest.mark.parametrize( "archival_file", sorted((DATA_DIR / "hg-archival").glob("*.txt")), ids=attrgetter("stem"), ) def test_parse_hg_archival(archival_file: Path) -> None: assert parse_hg_archival(archival_file) == json.loads( archival_file.with_suffix(".json").read_text(encoding="utf-8") ) ``` #### File: versioningit/test/test_main.py ```python import logging import os from pathlib import Path import subprocess import sys from _pytest.capture import CaptureFixture import pytest from pytest_mock import MockerFixture from versioningit.__main__ import main from versioningit.errors import Error def test_command( capsys: CaptureFixture[str], mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch ) -> None: monkeypatch.setattr(sys, "argv", ["versioningit"]) m = mocker.patch("versioningit.__main__.get_version", return_value="THE VERSION") spy = mocker.spy(logging, "basicConfig") main() m.assert_called_once_with(os.curdir, write=False, fallback=True) spy.assert_called_once_with( format="[%(levelname)-8s] %(name)s: %(message)s", level=logging.WARNING, ) out, err = capsys.readouterr() assert out == "THE VERSION\n" assert err == "" def test_command_arg( capsys: CaptureFixture[str], mocker: MockerFixture, tmp_path: Path ) -> None: m = mocker.patch("versioningit.__main__.get_version", return_value="THE VERSION") main([str(tmp_path)]) m.assert_called_once_with(str(tmp_path), write=False, fallback=True) out, err = capsys.readouterr() assert out == "THE VERSION\n" assert err == "" def test_command_write(capsys: CaptureFixture[str], mocker: MockerFixture) -> None: m = mocker.patch("versioningit.__main__.get_version", return_value="THE VERSION") main(["--write"]) m.assert_called_once_with(os.curdir, write=True, fallback=True) out, err = capsys.readouterr() assert out == "THE VERSION\n" assert err == "" def test_command_next_version( capsys: CaptureFixture[str], mocker: MockerFixture ) -> None: m = mocker.patch( "versioningit.__main__.get_next_version", return_value="THE NEXT VERSION" ) main(["--next-version"]) m.assert_called_once_with(os.curdir) out, err = capsys.readouterr() assert out == "THE NEXT VERSION\n" assert err == "" def test_command_next_version_arg( capsys: CaptureFixture[str], mocker: MockerFixture, tmp_path: Path ) -> None: m = mocker.patch( "versioningit.__main__.get_next_version", return_value="THE NEXT VERSION" ) main(["-n", str(tmp_path)]) m.assert_called_once_with(str(tmp_path)) out, err = capsys.readouterr() assert out == "THE NEXT VERSION\n" assert err == "" @pytest.mark.parametrize( "arg,log_level", [ ("-v", logging.INFO), ("-vv", logging.DEBUG), ("-vvv", logging.DEBUG), ], ) def test_command_verbose( capsys: CaptureFixture[str], mocker: MockerFixture, arg: str, log_level: int ) -> None: m = mocker.patch("versioningit.__main__.get_version", return_value="THE VERSION") spy = mocker.spy(logging, "basicConfig") main([arg]) m.assert_called_once_with(os.curdir, write=False, fallback=True) spy.assert_called_once_with( format="[%(levelname)-8s] %(name)s: %(message)s", level=log_level, ) out, err = capsys.readouterr() assert out == "THE VERSION\n" assert err == "" def test_command_error( capsys: CaptureFixture[str], mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch ) -> None: monkeypatch.setattr(sys, "argv", ["versioningit"]) m = mocker.patch( "versioningit.__main__.get_version", side_effect=Error("Something broke") ) with pytest.raises(SystemExit) as excinfo: main() assert excinfo.value.args == (1,) m.assert_called_once_with(os.curdir, write=False, fallback=True) out, err = capsys.readouterr() assert out == "" assert err == "versioningit: Error: Something broke\n" def test_command_subprocess_error( caplog: pytest.LogCaptureFixture, capsys: CaptureFixture[str], mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch, ) -> None: monkeypatch.setattr(sys, "argv", ["versioningit"]) m = mocker.patch( "versioningit.__main__.get_version", side_effect=subprocess.CalledProcessError( returncode=42, cmd=["git", "-C", ".", "get details"], output=b"", stderr=b"" ), ) with pytest.raises(SystemExit) as excinfo: main() assert excinfo.value.args == (42,) m.assert_called_once_with(os.curdir, write=False, fallback=True) out, err = capsys.readouterr() assert out == "" assert err == "" assert caplog.record_tuples == [ ("versioningit", logging.ERROR, "git -C . 'get details': command returned 42") ] ``` #### File: versioningit/test/test_tag2version.py ```python from typing import Any, Dict import pytest from versioningit.basics import basic_tag2version from versioningit.errors import InvalidTagError @pytest.mark.parametrize( "tag,params,version", [ ("v01.02.03", {}, "01.02.03"), ("01.02.03", {}, "01.02.03"), ("vbad", {}, "bad"), ("v", {}, ""), ("rel-1.2.3", {}, "rel-1.2.3"), ("rel-1.2.3", {"rmprefix": "rel-"}, "1.2.3"), ("1.2.3-final", {}, "1.2.3-final"), ("1.2.3-final", {"rmsuffix": "-final"}, "1.2.3"), ("rel-1.2.3-final", {"regex": r"\d+(\.\d+)+"}, "1.2.3"), ( "rel-1.2.3-final", {"regex": r"^rel-(?P<version>\d+(\.\d+)+)-final$"}, "1.2.3", ), ( "rel-1.2.3-final", {"rmprefix": "rel-", "regex": r"^rel-(?P<version>\d+(\.\d+)+)-final$"}, "1.2.3-final", ), ], ) def test_basic_tag2version(tag: str, params: Dict[str, Any], version: str) -> None: assert basic_tag2version(tag=tag, **params) == version def test_basic_tag2version_no_version_captured() -> None: with pytest.raises(InvalidTagError) as excinfo: basic_tag2version(tag="rel-final", regex=r"^rel-(?P<version>\d+(\.d+)+)?") assert str(excinfo.value) == ( "'version' group in tool.versioningit.tag2version.regex did" " not participate in match" ) def test_basic_tag2version_require_match() -> None: with pytest.raises(InvalidTagError) as excinfo: basic_tag2version( tag="rel-1.2.3-final", **{ "rmprefix": "rel-", "regex": r"^rel-(?P<version>\d+(\.\d+)+)-final$", "require-match": "yes", } ) assert str(excinfo.value) == "tag2version.regex did not match tag '1.2.3-final'" ```
{ "source": "jenshnielsen/zhinst-qcodes", "score": 2 }
#### File: control/drivers/pqsc.py ```python from .base import ZIBaseInstrument from qcodes.instrument.channel import ChannelList, InstrumentChannel import qcodes.utils.validators as vals import zhinst.toolkit as tk from typing import List, Dict, Union import numpy as np class PQSC(ZIBaseInstrument): """QCoDeS driver for the *Zurich Instruments PQSC*""" def __init__( self, name: str, serial: str, interface: str = "1gbe", host: str = "localhost", port: int = 8004, api: int = 6, **kwargs, ) -> None: super().__init__(name, "pqsc", serial, interface, host, port, api, **kwargs) submodules = self.nodetree_dict.keys() blacklist = [] [self._init_submodule(key) for key in submodules if key not in blacklist] def _connect(self) -> None: """Connects the device to the data server. Instantiates the device controller from :mod:`zhinst-toolkit`, sets up the data server and connects the device the data server. This method is called from `__init__` of the :class:`BaseInstrument` class. """ self._controller = tk.PQSC( self._name, self._serial, interface=self._interface, host=self._host, port=self._port, api=self._api, ) self._controller.setup() self._controller.connect_device() self.connect_message() self.nodetree_dict = self._controller.nodetree._nodetree_dict self._add_qcodes_params() def _add_qcodes_params(self): # add custom parameters as QCoDeS parameters super()._add_qcodes_params() self.add_parameter( "ref_clock", unit=self._controller.ref_clock._unit, docstring=self._controller.ref_clock.__repr__(), get_cmd=self._controller.ref_clock, set_cmd=self._controller.ref_clock, label="Intended Reference Clock Source", ) self.add_parameter( "ref_clock_actual", unit=self._controller.ref_clock_actual._unit, docstring=self._controller.ref_clock_actual.__repr__(), get_cmd=self._controller.ref_clock_actual, set_cmd=self._controller.ref_clock_actual, label="Actual Reference Clock Source", ) self.add_parameter( "ref_clock_status", unit=self._controller.ref_clock_status._unit, docstring=self._controller.ref_clock_status.__repr__(), get_cmd=self._controller.ref_clock_status, set_cmd=self._controller.ref_clock_status, label="Status Reference Clock", ) self.add_parameter( "repetitions", unit=self._controller.repetitions._unit, docstring=self._controller.repetitions.__repr__(), get_cmd=self._controller.repetitions, set_cmd=self._controller.repetitions, label="Repetitions (Number of Triggers)", ) self.add_parameter( "holdoff", unit=self._controller.holdoff._unit, docstring=self._controller.holdoff.__repr__(), get_cmd=self._controller.holdoff, set_cmd=self._controller.holdoff, label="Hold-off Time Between Triggers", ) self.add_parameter( "progress", unit=self._controller.progress._unit, docstring=self._controller.progress.__repr__(), get_cmd=self._controller.progress, set_cmd=self._controller.progress, label="Fraction of Triggers Generated", ) def factory_reset(self, sync=True) -> None: """Load the factory default settings. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after loading the factory preset (default: True). """ self._controller.factory_reset(sync=sync) def arm(self, sync=True, repetitions: int = None, holdoff: float = None) -> None: """Prepare PQSC for triggering the instruments. This method configures the execution engine of the PQSC and clears the register bank. Optionally, the *number of triggers* and *hold-off time* can be set when specified as keyword arguments. If they are not specified, they are not changed. Note that the PQSC is disabled at the end of the hold-off time after sending out the last trigger. Therefore, the hold-off time should be long enough such that the PQSC is still enabled when the feedback arrives. Otherwise, the feedback cannot be processed. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after stopping the PQSC and clearing the register bank (default: True). repetitions (int): If specified, the number of triggers sent over ZSync ports will be set (default: None). holdoff (double): If specified, the time between repeated triggers sent over ZSync ports will be set. It has a minimum value and a granularity of 100 ns (default: None). """ self._controller.arm(sync=sync, repetitions=repetitions, holdoff=holdoff) def run(self, sync=True) -> None: """Start sending out triggers. This method activates the trigger generation to trigger all connected instruments over ZSync ports. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after enabling the PQSC (default: True). """ self._controller.run(sync=sync) def arm_and_run(self, repetitions: int = None, holdoff: float = None) -> None: """Arm the PQSC and start sending out triggers. Simply combines the methods arm and run. A synchronisation is performed between the device and the data server after arming and running the PQSC. Arguments: repetitions (int): If specified, the number of triggers sent over ZSync ports will be set (default: None). holdoff (double): If specified, the time between repeated triggers sent over ZSync ports will be set. It has a minimum value and a granularity of 100 ns (default: None). """ self._controller.arm_and_run(repetitions=repetitions, holdoff=holdoff) def stop(self, sync=True) -> None: """Stop the trigger generation. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after disabling the PQSC (default: True). """ self._controller.stop(sync=sync) def wait_done(self, timeout: float = 10, sleep_time: float = 0.005) -> None: """Wait until trigger generation and feedback processing is done. Arguments: timeout (float): The maximum waiting time in seconds for the PQSC (default: 10). sleep_time (float): Time in seconds to wait between requesting PQSC state Raises: TimeoutError: If the PQSC is not done sending out all triggers and processing feedback before the timeout. """ self._controller.wait_done(timeout=timeout, sleep_time=sleep_time) def check_ref_clock( self, blocking: bool = True, timeout: int = 30, sleep_time: int = 1 ) -> None: """Check if reference clock is locked successfully. Keyword Arguments: blocking (bool): A flag that specifies if the program should be blocked until the reference clock is 'locked'. (default: True) timeout (int): Maximum time in seconds the program waits when `blocking` is set to `True` (default: 30). sleep_time (int): Time in seconds to wait between requesting the reference clock status (default: 1) Raises: ToolkitError: If the device fails to lock on the reference clock. """ self._controller.check_ref_clock( blocking=blocking, timeout=timeout, sleep_time=sleep_time ) def check_zsync_connection(self, ports=0, blocking=True, timeout=30) -> None: """Check if the ZSync connection on the given port is successful. This function checks the current status of the instrument connected to the given port. Arguments: ports (list) or (int): The port numbers to check the ZSync connection for. It can either be a single port number given as integer or a list of several port numbers. (default: 0) blocking (bool): A flag that specifies if the program should be blocked until the status is 'connected'. (default: False) timeout (int): Maximum time in seconds the program waits when `blocking` is set to `True`. (default: 30) Raises: ToolkitError: If ZSync connection to the instruments on the specified ports is not established. """ self._controller.check_zsync_connection( ports=ports, blocking=blocking, timeout=timeout ) @property def is_running(self): return self._controller.is_running ``` #### File: control/drivers/shfqa.py ```python from .base import ZIBaseInstrument from qcodes.instrument.channel import ChannelList, InstrumentChannel import zhinst.toolkit as tk from zhinst.toolkit.control.drivers.shfqa import ( QAChannel as SHFQA_QAChannel, Generator as SHFQA_Generator, Readout as SHFQA_Readout, Integration as SHFQA_Integration, Sweeper as SHFQA_Sweeper, Scope as SHFQA_Scope, ) from typing import List, Dict, Union import numpy as np class QAChannel(InstrumentChannel): """Device-specific *QAChannel* for the *SHFQA*.""" def __init__(self, name: str, index: int, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._qachannel = SHFQA_QAChannel(parent_contr, index) self._qachannel._init_qachannel_params() self._add_qcodes_qachannel_params() self._init_generator() self._init_readout() self._init_sweeper() def _add_qcodes_qachannel_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "input", docstring=self._qachannel.input.__repr__(), get_cmd=self._qachannel.input, set_cmd=self._qachannel.input, label="Enable Signal Input", ) self.add_parameter( "input_range", unit=self._qachannel.input_range._unit, docstring=self._qachannel.input_range.__repr__(), get_cmd=self._qachannel.input_range, set_cmd=self._qachannel.input_range, label="Maximal Range of the Signal Input Power", ) self.add_parameter( "output", docstring=self._qachannel.output.__repr__(), get_cmd=self._qachannel.output, set_cmd=self._qachannel.output, label="Enable Signal Output", ) self.add_parameter( "output_range", unit=self._qachannel.output_range._unit, docstring=self._qachannel.output_range.__repr__(), get_cmd=self._qachannel.output_range, set_cmd=self._qachannel.output_range, label="Maximal Range of the Signal Output Power", ) self.add_parameter( "center_freq", unit=self._qachannel.center_freq._unit, docstring=self._qachannel.center_freq.__repr__(), get_cmd=self._qachannel.center_freq, set_cmd=self._qachannel.center_freq, label="Center Frequency of the Analysis Band", ) self.add_parameter( "mode", docstring=self._qachannel.mode.__repr__(), get_cmd=self._qachannel.mode, set_cmd=self._qachannel.mode, label="Spectroscopy or Qubit Readout Mode Selection", ) def _init_generator(self): # init submodule for Generator self.add_submodule("generator", Generator("generator", self, self._qachannel)) def _init_readout(self): # init submodule for Readout module self.add_submodule("readout", Readout("readout", self, self._qachannel)) def _init_sweeper(self): # init submodule for Sweeper self.add_submodule("sweeper", Sweeper("sweeper", self, self._qachannel)) class Generator(InstrumentChannel): """Device-specific *Generator* for the *SHFQA*.""" def __init__(self, name: str, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._generator = SHFQA_Generator(parent_contr) self._generator._setup() self._generator._init_generator_params() self._add_qcodes_generator_params() def _add_qcodes_generator_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "dig_trigger1_source", docstring=self._generator.dig_trigger1_source.__repr__(), get_cmd=self._generator.dig_trigger1_source, set_cmd=self._generator.dig_trigger1_source, label="Digital Trigger 1 Source", ) self.add_parameter( "dig_trigger2_source", docstring=self._generator.dig_trigger2_source.__repr__(), get_cmd=self._generator.dig_trigger2_source, set_cmd=self._generator.dig_trigger2_source, label="Digital Trigger 2 Source", ) self.add_parameter( "playback_delay", docstring=self._generator.playback_delay.__repr__(), get_cmd=self._generator.playback_delay, set_cmd=self._generator.playback_delay, label="Delay for the Start of Playback", ) self.add_parameter( "single", docstring=self._generator.single.__repr__(), get_cmd=self._generator.single, set_cmd=self._generator.single, label="Single Run", ) def run(self, sync=True) -> None: """Run the generator. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after enabling the generator (default: True). """ self._generator.run(sync=sync) def stop(self, sync=True) -> None: """Stop the generator. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after disabling the generator (default: True). """ self._generator.stop(sync=sync) def wait_done(self, timeout: float = 10, sleep_time: float = 0.005) -> None: """Wait until the generator is finished. Arguments: timeout (float): The maximum waiting time in seconds for the generator (default: 10). sleep_time (float): Time in seconds to wait between requesting generator state Raises: ToolkitError: If the generator is running in continuous mode. TimeoutError: If the generator is not finished before the timeout. """ self._generator.wait_done(timeout=timeout, sleep_time=sleep_time) def compile(self) -> None: """Compile the current SequenceProgram and load it to sequencer. Raises: ToolkitConnectionError: If the AWG Core has not been set up yet ToolkitError: if the compilation has failed or the ELF upload is not successful. TimeoutError: if the program upload is not completed before timeout. """ return self._generator.compile() def reset_queue(self) -> None: """Resets the waveform queue to an empty list.""" self._generator.reset_queue() def queue_waveform(self, wave: Union[List, np.array], delay: float = 0) -> None: """Add a new waveform to the queue. Arguments: wave (array): The waveform to be queued as a 1D numpy array. delay (int): An individual delay in seconds for this waveform w.r.t. the time origin of the sequence. (default: 0) Raises: ToolkitError: If the sequence is not of type *'Custom'*. """ self._generator.queue_waveform(wave, delay=delay) def replace_waveform( self, wave: Union[List, np.array], i: int = 0, delay: float = 0, ) -> None: """Replace a waveform in the queue at a given index. Arguments: wave (array): Waveform to replace current wave i (int): The index of the waveform in the queue to be replaced. delay (int): An individual delay in seconds for this waveform w.r.t. the time origin of the sequence. (default: 0) Raises: ValueError: If the given index is out of range. """ self._generator.replace_waveform(wave, i=i, delay=delay) def upload_waveforms(self) -> None: """Upload all waveforms in the queue to the Generator. This method only works as expected if the Sequence Program has been compiled beforehand. See :func:`compile_and_upload_waveforms(...)`. """ self._generator.upload_waveforms() def compile_and_upload_waveforms(self) -> None: """Compiles the Sequence Program and uploads the queued waveforms. Simply combines the two methods to make sure the sequence is compiled before the waveform queue is uplaoded. """ self._generator.compile_and_upload_waveforms() def set_sequence_params(self, **kwargs) -> None: """Sets the parameters of the Sequence Program. Passes all the keyword arguments to the `set_param(...)` method of the Sequence Program. The available sequence parameters may vary between different sequences. For a list of all current sequence parameters see the property `sequence_params`. They include *'sequence_type'*, *'period'*, *'repetitions'*, *'trigger_mode'*, *'trigger_delay'*, ... >>> shfqa.qachannels[0].generator.set_sequence_params( >>> sequence_type="Custom", >>> program = seqc_program_string, >>> custom_params = [param1, param2], >>> ) """ self._generator.set_sequence_params(**kwargs) @property def name(self): return self._generator.name @property def waveforms(self): return self._generator.waveforms @property def is_running(self): return self._generator.is_running @property def sequence_params(self): return self._generator.sequence_params class Readout(InstrumentChannel): """Device-specific *Readout* module for the *SHFQA*.""" def __init__(self, name: str, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._readout = SHFQA_Readout(parent_contr) self._readout._init_readout_params() self._add_qcodes_readout_params() self._init_integrations() def _add_qcodes_readout_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "integration_length", docstring=self._readout.integration_length.__repr__(), get_cmd=self._readout.integration_length, set_cmd=self._readout.integration_length, label="Integration Length", ) self.add_parameter( "integration_delay", docstring=self._readout.integration_delay.__repr__(), get_cmd=self._readout.integration_delay, set_cmd=self._readout.integration_delay, label="Integration Delay", ) self.add_parameter( "result_source", docstring=self._readout.result_source.__repr__(), get_cmd=self._readout.result_source, set_cmd=self._readout.result_source, label="Result Source", ) def _init_integrations(self): # init submodules for Integration Units num_integrations = self._readout.device.num_integrations_per_qachannel() channel_list = ChannelList(self, "integrations", Integration) for i in range(num_integrations): channel_list.append(Integration(f"integration-{i}", i, self, self._readout)) channel_list.lock() self.add_submodule("integrations", channel_list) self._readout._init_integrations() @property def is_running(self): return self._readout.is_running def arm(self, sync=True, length: int = None, averages: int = None) -> None: """Prepare SHF device for readout and result acquisition. This method enables the QA Results Acquisition and resets the acquired points. Optionally, the *result length* and *result averages* can be set when specified as keyword arguments. If they are not specified, they are not changed. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after stopping the SHF device and clearing the register bank (default: True). length (int): If specified, the length of the result vector will be set before arming the readout. (default: None) averages (int): If specified, the result averages will be set before arming the readout. (default: None) """ self._readout.arm(sync=sync, length=length, averages=averages) def run(self, sync=True) -> None: """Start the result logger. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after starting the result logger (default: True). Raises: ToolkitError: If `sync=True` and the result logger cannot be started """ self._readout.run(sync=sync) def stop(self, sync=True) -> None: """Stop the result logger. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after stopping the result logger (default: True). Raises: ToolkitError: If `sync=True` and the result logger cannot be stopped """ self._readout.stop(sync=sync) def wait_done(self, timeout: float = 10, sleep_time: float = 0.005) -> None: """Wait until readout is finished. Arguments: timeout (float): The maximum waiting time in seconds for the Readout (default: 10). sleep_time (float): Time in seconds to wait between requesting Readout state Raises: TimeoutError: if the readout recording is not completed before timeout. """ self._readout.wait_done(timeout=timeout, sleep_time=sleep_time) def read( self, integrations: list = [], blocking: bool = True, timeout: float = 10, sleep_time: float = 0.005, ): """Read out the measured data from the result logger. Arguments: integrations (list): The list of integrations to return the data for. If no integration is specified, the method will return the data for all integrations (default: []). blocking (bool): A flag that specifies if the program should be blocked until the result logger finished recording (default: True). timeout (float): The maximum waiting time in seconds for the Readout (default: 10). sleep_time (float): Time in seconds to wait between requesting Readout state Returns: An array containing the result logger data. Raises: TimeoutError: if the readout recording is not completed before timeout. """ return self._readout.read( integrations=integrations, blocking=blocking, timeout=timeout, sleep_time=sleep_time, ) class Integration(InstrumentChannel): """Device-specific *Integration* for the *SHFQA*.""" def __init__(self, name: str, index: int, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._integration = SHFQA_Integration(parent_contr, index) self._integration._init_integration_params() self._add_qcodes_integration_params() def _add_qcodes_integration_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "threshold", unit=self._integration.threshold._unit, docstring=self._integration.threshold.__repr__(), get_cmd=self._integration.threshold, set_cmd=self._integration.threshold, label="Signal Threshold for State Discrimination", ) self.add_parameter( "result", unit=self._integration.result._unit, docstring=self._integration.result.__repr__(), get_cmd=self._integration.result, set_cmd=self._integration.result, label="Result Vector Data", ) self.add_parameter( "weights", unit=self._integration.weights._unit, docstring=self._integration.weights.__repr__(), get_cmd=self._integration.weights, set_cmd=self._integration.weights, label="Complex-valued Waveform of the Integration Weights", ) def set_int_weights(self, weights): return self._integration.set_int_weights(weights) class Sweeper(InstrumentChannel): """Device-specific *Sweeper* for the *SHFQA*.""" def __init__(self, name: str, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._sweeper = SHFQA_Sweeper(parent_contr) self._sweeper._init_sweeper_params() self._add_qcodes_sweeper_params() def _add_qcodes_sweeper_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "oscillator_gain", unit=self._sweeper.oscillator_gain._unit, docstring=self._sweeper.oscillator_gain.__repr__(), get_cmd=self._sweeper.oscillator_gain, set_cmd=self._sweeper.oscillator_gain, label="Gain of Digital Oscillator", ) self.add_parameter( "oscillator_freq", unit=self._sweeper.oscillator_freq._unit, docstring=self._sweeper.oscillator_freq.__repr__(), get_cmd=self._sweeper.oscillator_freq, set_cmd=self._sweeper.oscillator_freq, label="Frequency of Digital Oscillator", ) self.add_parameter( "integration_time", unit=self._sweeper.integration_time._unit, docstring=self._sweeper.integration_time.__repr__(), get_cmd=self._sweeper.integration_time, set_cmd=self._sweeper.integration_time, label="Spectroscopy Integration Time", ) self.add_parameter( "integration_length", unit=self._sweeper.integration_length._unit, docstring=self._sweeper.integration_length.__repr__(), get_cmd=self._sweeper.integration_length, set_cmd=self._sweeper.integration_length, label="Spectroscopy Integration Length", ) self.add_parameter( "integration_delay", unit=self._sweeper.integration_delay._unit, docstring=self._sweeper.integration_delay.__repr__(), get_cmd=self._sweeper.integration_delay, set_cmd=self._sweeper.integration_delay, label="Spectroscopy Integration Delay", ) self.add_parameter( "trigger_source", unit=self._sweeper.trigger_source._unit, docstring=self._sweeper.trigger_source.__repr__(), get_cmd=self._sweeper.trigger_source, set_cmd=self._sweeper.trigger_source, label="Trigger Source for the Sweeper", ) def trigger_level(self, level=None): """Set or get the trigger level for the sweeper. Arguments: level (float): Trigger level of the sweeper (default: None). """ return self._sweeper.trigger_level(level=level) def trigger_imp50(self, imp50=None): """Set or get the trigger input impedance setting for the sweeper. Arguments: imp50 (bool): Trigger input impedance selection for the sweeper. When set to True, the trigger input impedance is 50 Ohm. When set to False, it is 1 kOhm (default: None). """ return self._sweeper.trigger_imp50(imp50=imp50) def start_frequency(self, freq=None): """Set or get the start frequency for the sweeper. Arguments: freq (float): Start frequency in Hz of the sweeper (default: None). """ return self._sweeper.start_frequency(freq=freq) def stop_frequency(self, freq=None): """Set or get the stop frequency for the sweeper. Arguments: freq (float): Stop frequency in Hz of the sweeper (default: None). """ return self._sweeper.stop_frequency(freq=freq) def output_freq(self): """Get the output frequency. Returns: The carrier frequency in Hz of the microwave signal at the Out connector. This frequency corresponds to the sum of the Center Frequency and the Offset Frequency. """ return self._sweeper.output_freq() def num_points(self, num=None): """Set or get the number of points for the sweeper. Arguments: num (int): Number of frequency points to sweep between start and stop frequency values (default: None). """ return self._sweeper.num_points(num=num) def mapping(self, map=None): """Set or get the mapping configuration for the sweeper. Arguments: map (str): Mapping that specifies the distances between frequency points of the sweeper. Can be either "linear" or "log" (default: None). """ return self._sweeper.mapping(map=map) def num_averages(self, num=None): """Set or get the number of averages for the sweeper. Number of averages specifies how many times a frequency point will be measured and averaged. Arguments: num (int): Number of times the sweeper measures one frequency point (default: None). """ return self._sweeper.num_averages(num=num) def averaging_mode(self, mode=None): """Set or get the averaging mode for the sweeper. Arguments: mode (str): Averaging mode for the sweeper. Can be either "sequential" or "cyclic" (default: None).\n "sequential": A frequency point is measured the number of times specified by the number of averages setting. In other words, the same frequency point is measured repeatedly until the number of averages is reached and the sweeper then moves to the next frequency point.\n "cyclic": All frequency points are measured once from start frequency to stop frequency. The sweeper then moves back to start frequency and repeats the sweep the number of times specified by the number of averages setting. """ return self._sweeper.averaging_mode(mode=mode) def run(self): """Perform a sweep with the specified settings. This method eventually wraps around the `run` method of `zhinst.utils.shf_sweeper` """ return self._sweeper.run() def read(self): """Get the measurement data of the last sweep. This method eventually wraps around the `get_result` method of `zhinst.utils.shf_sweeper` Returns: A dictionary with measurement data of the last sweep """ return self._sweeper.read() def plot(self): """Plot power over frequency for last sweep. This method eventually wraps around the `plot` method of `zhinst.utils.shf_sweeper` """ return self._sweeper.plot() class Scope(InstrumentChannel): """Device-specific *Scope* for the *SHFQA*.""" def __init__(self, name: str, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._scope = SHFQA_Scope(parent_contr) self._scope._init_scope_params() self._add_qcodes_scope_params() def _add_qcodes_scope_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "channel1", docstring=self._scope.channel1.__repr__(), get_cmd=self._scope.channel1, set_cmd=self._scope.channel1, label="Enable Recording for Scope Channel 1", ) self.add_parameter( "channel2", docstring=self._scope.channel2.__repr__(), get_cmd=self._scope.channel2, set_cmd=self._scope.channel2, label="Enable Recording for Scope Channel 2", ) self.add_parameter( "channel3", docstring=self._scope.channel3.__repr__(), get_cmd=self._scope.channel3, set_cmd=self._scope.channel3, label="Enable Recording for Scope Channel 3", ) self.add_parameter( "channel4", docstring=self._scope.channel4.__repr__(), get_cmd=self._scope.channel4, set_cmd=self._scope.channel4, label="Enable Recording for Scope Channel 4", ) self.add_parameter( "input_select1", docstring=self._scope.input_select1.__repr__(), get_cmd=self._scope.input_select1, set_cmd=self._scope.input_select1, label="Select Input Signal for Scope Channel 1", ) self.add_parameter( "input_select2", docstring=self._scope.input_select2.__repr__(), get_cmd=self._scope.input_select2, set_cmd=self._scope.input_select2, label="Select Input Signal for Scope Channel 2", ) self.add_parameter( "input_select3", docstring=self._scope.input_select3.__repr__(), get_cmd=self._scope.input_select3, set_cmd=self._scope.input_select3, label="Select Input Signal for Scope Channel 3", ) self.add_parameter( "input_select4", docstring=self._scope.input_select4.__repr__(), get_cmd=self._scope.input_select4, set_cmd=self._scope.input_select4, label="Select Input Signal for Scope Channel 4", ) self.add_parameter( "trigger_source", docstring=self._scope.trigger_source.__repr__(), get_cmd=self._scope.trigger_source, set_cmd=self._scope.trigger_source, label="Scope Trigger Source", ) self.add_parameter( "trigger_delay", unit=self._scope.trigger_delay._unit, docstring=self._scope.trigger_delay.__repr__(), get_cmd=self._scope.trigger_delay, set_cmd=self._scope.trigger_delay, label="Scope Trigger Delay", ) self.add_parameter( "length", unit=self._scope.length._unit, docstring=self._scope.length.__repr__(), get_cmd=self._scope.length, set_cmd=self._scope.length, label="Length of Scope Shot", ) self.add_parameter( "time", unit=self._scope.time._unit, docstring=self._scope.time.__repr__(), get_cmd=self._scope.time, set_cmd=self._scope.time, label="Time Base of the Scope", ) def run(self, sync=True) -> None: """Run the scope recording. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after starting the scope recording (default: True). """ self._scope.run(sync=sync) def stop(self, sync=True) -> None: """Stop the scope recording. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after stopping scope recording (default: True). """ self._scope.stop(sync=sync) def wait_done(self, timeout: float = 10, sleep_time: float = 0.005) -> None: """Wait until the Scope recording is finished. Arguments: timeout (int): The maximum waiting time in seconds for the Scope (default: 10). sleep_time (float): Time in seconds to wait between requesting the progress and records values Raises: ToolkitError: If the Scope recording is not done before the timeout. """ self._scope.wait_done(timeout=timeout, sleep_time=sleep_time) def read( self, channel=None, blocking: bool = True, timeout: float = 10, sleep_time: float = 0.005, ): """Read out the recorded data from the specified channel of the scope. Arguments: channel (int): The scope channel to read the data from. If no channel is specified, the method will return the data for all channels. blocking (bool): A flag that specifies if the program should be blocked until the scope has finished recording (default: True). timeout (float): The maximum waiting time in seconds for the Scope (default: 10). sleep_time (float): Time in seconds to wait between requesting the progress and records values Returns: A dictionary showing the recorded data and scope time. Raises: TimeoutError: if the scope recording is not completed before timeout. """ return self._scope.read( channel=channel, blocking=blocking, timeout=timeout, sleep_time=sleep_time ) def channels(self, value=None): """Set all Scope channels simultaneously. Arguments: value (tuple): Tuple of values {'on', 'off'} for channel 1, 2, 3 and 4 (default: None). Returns: A tuple with the states {'on', 'off'} for all input channels. """ return self._scope.channels(value=value) def input_select(self, value=None): """Set all Scope input signals simultaneously. Keyword Arguments: value (tuple): Tuple of values for input signal 1, 2, 3 and 4. The accepted values can be found in SHFQA user manual (default: None). Returns: A tuple with the selected input signal sources for all input channels. """ return self._scope.input_select(value=value) def segments(self, enable=None, count=None): """Configure segmented Scope recording options. Keyword Arguments: enable (bool): a flag that specifies whether segmented Scope recording is enabled (default: None). count (int): number of segments in device memory (default: None) Returns: A dictionary showing the enable state and segment count """ return self._scope.segments(enable=enable, count=count) def averaging(self, enable=None, count=None): """Configure averaging options of Scope measurements. Keyword Arguments: enable (bool): a flag that specifies whether averaging of Scope measurements is enabled (default: None). count (int): number of Scope measurements to average (default: None) Returns: A dictionary showing the enable state and averaging count """ return self._scope.averaging(enable=enable, count=count) @property def is_running(self): return self._scope.is_running class SHFQA(ZIBaseInstrument): """QCoDeS driver for the *Zurich Instruments SHFQA*""" def __init__( self, name: str, serial: str, interface: str = "1gbe", host: str = "localhost", port: int = 8004, api: int = 6, **kwargs, ) -> None: super().__init__(name, "shfqa", serial, interface, host, port, api, **kwargs) submodules = self.nodetree_dict.keys() blacklist = [ "qachannels", "scopes", ] [self._init_submodule(key) for key in submodules if key not in blacklist] def _connect(self) -> None: """Connects the device to the data server. Instantiates the device controller from :mod:`zhinst-toolkit`, sets up the data server and connects the device the data server. This method is called from `__init__` of the :class:`BaseInstrument` class. """ self._controller = tk.SHFQA( self._name, self._serial, interface=self._interface, host=self._host, port=self._port, api=self._api, ) self._controller.setup() self._controller.connect_device() self.connect_message() self.nodetree_dict = self._controller.nodetree._nodetree_dict self._init_qachannels() self._init_scope() self._add_qcodes_params() def _init_qachannels(self): # init submodules for QAChannels num_qachannels = self._controller.num_qachannels() channel_list = ChannelList(self, "qachannels", QAChannel) for i in range(num_qachannels): channel_list.append(QAChannel(f"qachannel-{i}", i, self, self._controller)) channel_list.lock() self.add_submodule("qachannels", channel_list) def _init_scope(self): # init submodule Scope self.add_submodule("scope", Scope("scope", self, self._controller)) def _add_qcodes_params(self): # add custom parameters as QCoDeS parameters super()._add_qcodes_params() self.add_parameter( "sw_trigger", unit=self._controller.sw_trigger._unit, docstring=self._controller.sw_trigger.__repr__(), get_cmd=self._controller.sw_trigger, set_cmd=self._controller.sw_trigger, label="Issue a Single Software Trigger Event", ) self.add_parameter( "ref_clock", unit=self._controller.ref_clock._unit, docstring=self._controller.ref_clock.__repr__(), get_cmd=self._controller.ref_clock, set_cmd=self._controller.ref_clock, label="Intended Reference Clock Source", ) self.add_parameter( "ref_clock_actual", unit=self._controller.ref_clock_actual._unit, docstring=self._controller.ref_clock_actual.__repr__(), get_cmd=self._controller.ref_clock_actual, set_cmd=self._controller.ref_clock_actual, label="Actual Reference Clock Source", ) self.add_parameter( "ref_clock_status", unit=self._controller.ref_clock_status._unit, docstring=self._controller.ref_clock_status.__repr__(), get_cmd=self._controller.ref_clock_status, set_cmd=self._controller.ref_clock_status, label="Status Reference Clock", ) def factory_reset(self, sync=True) -> None: """Load the factory default settings. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after loading the factory preset (default: True). """ self._controller.factory_reset(sync=sync) def set_trigger_loopback(self): """Start a trigger pulse using the internal loopback. A 1kHz continuous trigger pulse from marker 1 A using the internal loopback to trigger in 1 A. """ self._controller.set_trigger_loopback() def clear_trigger_loopback(self): """Stop the the internal loopback trigger pulse.""" self._controller.clear_trigger_loopback() def check_ref_clock( self, blocking: bool = True, timeout: int = 30, sleep_time: int = 1 ) -> None: """Check if reference clock is locked successfully. Arguments: blocking (bool): A flag that specifies if the program should be blocked until the reference clock is 'locked'. (default: True) timeout (int): Maximum time in seconds the program waits when `blocking` is set to `True` (default: 30). sleep_time (int): Time in seconds to wait between requesting the reference clock status (default: 1) Raises: ToolkitError: If the device fails to lock on the reference clock. """ self._controller.check_ref_clock( blocking=blocking, timeout=timeout, sleep_time=sleep_time ) @property def allowed_sequences(self): return self._controller.allowed_sequences @property def allowed_trigger_modes(self): return self._controller.allowed_trigger_modes ``` #### File: control/drivers/shfsg.py ```python from .base import ZIBaseInstrument import zhinst.toolkit as tk from qcodes.instrument.channel import ChannelList, InstrumentChannel from typing import List, Dict, Union import numpy as np class SGChannel(InstrumentChannel): """SGChannel for the SHFSG Inherits from :class:`InstrumentChannel` and wraps around a `SG Channel` for *SHFSG* from :mod:`zhinst-toolkit`. This class adds *Parameters* from the :mod:`zhinst-toolkit` as *QCoDeS Parameters* and wraps all methods of the *toolkit's* `SGChannel`. Arguments: name (str): The name of the `AWG` submodule. parent_instr (:class:`qcodes.instrument.base.Instrument`): The QCoDeS parent instrument of the `InstrumentChannel`. parent_contr (:class:`zhinst.toolkit.BaseInstrument`): The `_controller` of the parent instrument that is used for getting and setting parameters. Attributes: output (:class:`Parameter`): State of the output, i.e. one of {'on', 'off'}. output_range (:class:`Parameter`): State of the output 1, i.e. one of {'on', 'off'}. rf_center_freq (:class:`Parameter`): State of the output 1, i.e. one of {'on', 'off'}. rf_or_lf_path (:class:`Parameter`): State of the output 1, i.e. one of {'on', 'off'}. """ def __init__(self, name: str, index: int, parent_instr, parent_contr) -> None: InstrumentChannel.__init__(self, parent_instr, name) self._sgchannel = parent_contr.sgchannels[index] self.add_submodule("awg", AWG("AWG", parent_instr, self)) self.add_submodule("sine", Sine("Sine", parent_instr, self)) self._add_qcodes_params() def _add_qcodes_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "output", unit=self._sgchannel.output._unit, docstring=self._sgchannel.output.__repr__(), get_cmd=self._sgchannel.output, set_cmd=self._sgchannel.output, label="Output", ) self.add_parameter( "output_range", unit=self._sgchannel.output_range._unit, docstring=self._sgchannel.output_range.__repr__(), get_cmd=self._sgchannel.output_range, set_cmd=self._sgchannel.output_range, label="Output Range", ) self.add_parameter( "rf_center_freq", unit=self._sgchannel.rf_center_freq._unit, docstring=self._sgchannel.rf_center_freq.__repr__(), get_cmd=self._sgchannel.rf_center_freq, set_cmd=self._sgchannel.rf_center_freq, label="RF Center Frequency", ) # self.add_parameter( # "digital_mixer_center_freq", # unit=self._sgchannel.digital_mixer_center_freq._unit, # docstring=self._sgchannel.digital_mixer_center_freq.__repr__(), # get_cmd=self._sgchannel.digital_mixer_center_freq, # set_cmd=self._sgchannel.digital_mixer_center_freq, # label="Digital Mixer Center Frequency", # ) self.add_parameter( "rf_or_lf_path", unit=self._sgchannel.rf_or_lf_path._unit, docstring=self._sgchannel.rf_or_lf_path.__repr__(), get_cmd=self._sgchannel.rf_or_lf_path, set_cmd=self._sgchannel.rf_or_lf_path, label="RF or LF Path", ) class AWG(InstrumentChannel): """AWG Core of a SGChannel for the SHFSG Inherits from :class:`InstrumentChannel` and wraps around a `AWGCore` for *SHFSG* from :mod:`zhinst-toolkit`. This class adds *Parameters* from the :mod:`zhinst-toolkit` as *QCoDeS Parameters* and wraps all methods of the *toolkit's* `AWGCore`. Arguments: name (str): The name of the `AWG` submodule. parent_instr (:class:`qcodes.instrument.base.Instrument`): The QCoDeS parent instrument of the `InstrumentChannel`. parent_contr (:class:`zhinst.toolkit.BaseInstrument`): The `_controller` of the parent instrument that is used for getting and setting parameters. Attributes: output1 (:class:`Parameter`): State of the output 1 (readonly), i.e. one of {'on', 'off'}. output2 (:class:`Parameter`): State of the output 2 (readonly), i.e. one of {'on', 'off'}. modulation_freq (:class:`Parameter`): Frequency of the modulation in Hz if IQ modulation is enabled. modulation_phase_shift (:class:`Parameter`): Phase shift in degrees between I and Q quadratures if IQ modulation is enabled (default: 90). gain00 (:class:`Parameter`): Gain of the output channel 1 of the I channel. Must be between -1 and +1 (default: +1). gain01 (:class:`Parameter`): Gain of the output channel 2 of the I channel. Must be between -1 and +1 (default: +1). gain10 (:class:`Parameter`): Gain of the output channel 1 of the Q channel. Must be between -1 and +1 (default: +1). gain11 (:class:`Parameter`): Gain of the output channel 2 of the Q channel. Must be between -1 and +1 (default: +1). single (:class:`Parameter`): Enable or Disable Single Run of the AWG. digital_trigger1_source (:class:`Parameter`): Digital Trigger 1 Source. digital_trigger2_source (:class:`Parameter`): Digital Trigger 2 Source. digital_trigger1_slope (:class:`Parameter`): Digital Trigger 1 Slope. digital_trigger2_slope (:class:`Parameter`): Digital Trigger 2 Slope. osc_select (:class:`Parameter`): Select Oscillator. waveforms (list): A list of `Waveforms` that respresent the queue of waveforms to upload to the device when the sequence type is *'Simple'*. is_running (bool): A flag that shows if the `AWG Core` is currently running or not. index (int): The index of the `AWG Core` in the list of *awgs*. """ def __init__(self, name: str, parent_instr, parent) -> None: InstrumentChannel.__init__(self, parent_instr, name) self._awg = parent._sgchannel.awg self._add_qcodes_params() def _add_qcodes_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "output1", unit=self._awg.output1._unit, docstring=self._awg.output1.__repr__(), get_cmd=self._awg.output1, label="AWG Output Ch 1 (readonly)", ) self.add_parameter( "output2", unit=self._awg.output2._unit, docstring=self._awg.output2.__repr__(), get_cmd=self._awg.output2, label="AWG Output Ch 2 (readonly)", ) self.add_parameter( "modulation_freq", unit=self._awg.modulation_freq._unit, docstring=self._awg.modulation_freq.__repr__(), get_cmd=self._awg.modulation_freq, label="Modulation Frequency", ) self.add_parameter( "modulation_phase_shift", unit=self._awg.modulation_phase_shift._unit, docstring=self._awg.modulation_phase_shift.__repr__(), get_cmd=self._awg.modulation_phase_shift, label="Modulation Phase Shift", ) self.add_parameter( "gain00", unit=self._awg.gain00._unit, docstring=self._awg.gain00.__repr__(), get_cmd=self._awg.gain00, set_cmd=self._awg.gain00, label="Output 0 Gain 0", ) self.add_parameter( "gain01", unit=self._awg.gain01._unit, docstring=self._awg.gain01.__repr__(), get_cmd=self._awg.gain01, set_cmd=self._awg.gain01, label="Output 0 Gain 1", ) self.add_parameter( "gain10", unit=self._awg.gain10._unit, docstring=self._awg.gain10.__repr__(), get_cmd=self._awg.gain10, set_cmd=self._awg.gain10, label="Output 1 Gain 0", ) self.add_parameter( "gain11", unit=self._awg.gain11._unit, docstring=self._awg.gain11.__repr__(), get_cmd=self._awg.gain11, set_cmd=self._awg.gain11, label="Output 1 Gain 1", ) self.add_parameter( "single", unit=self._awg.single._unit, docstring=self._awg.single.__repr__(), get_cmd=self._awg.single, set_cmd=self._awg.single, label="Single Run", ) self.add_parameter( "digital_trigger1_source", unit=self._awg.digital_trigger1_source._unit, docstring=self._awg.digital_trigger1_source.__repr__(), get_cmd=self._awg.digital_trigger1_source, set_cmd=self._awg.digital_trigger1_source, label="Digital Trigger 1 Source", ) self.add_parameter( "digital_trigger2_source", unit=self._awg.digital_trigger2_source._unit, docstring=self._awg.digital_trigger2_source.__repr__(), get_cmd=self._awg.digital_trigger2_source, set_cmd=self._awg.digital_trigger2_source, label="Digital Trigger 2 Source", ) self.add_parameter( "digital_trigger1_slope", unit=self._awg.digital_trigger1_slope._unit, docstring=self._awg.digital_trigger1_slope.__repr__(), get_cmd=self._awg.digital_trigger1_slope, set_cmd=self._awg.digital_trigger1_slope, label="Digital Trigger 1 Slope", ) self.add_parameter( "digital_trigger2_slope", unit=self._awg.digital_trigger2_slope._unit, docstring=self._awg.digital_trigger2_slope.__repr__(), get_cmd=self._awg.digital_trigger2_slope, set_cmd=self._awg.digital_trigger2_slope, label="Digital Trigger 2 Slope", ) self.add_parameter( "osc_select", unit=self._awg.osc_select._unit, docstring=self._awg.osc_select.__repr__(), get_cmd=self._awg.osc_select, set_cmd=self._awg.osc_select, label="Selected Oscillator", ) def load_ct(self, table): """Load a given command table to the instrument""" self._awg.ct.load(table) def enable_iq_modulation(self) -> None: """Enables IQ Modulation by on the *AWG Core*. This method applies the corresponding settings for IQ modulation using one of the internal oscillators and two sine generators. The sines are used to modulate the AWG output channels. The *parameters* `modulation_freq`, `modulation_phase_shift` and `gain1`, `gain2` correspond to the settings of the oscillator and the sine generators. """ self._awg.enable_iq_modulation() def disable_iq_modulation(self) -> None: """Disables IQ modulation on the *AWG Core*. Resets the settings of the sine generators and the AWG modulation. """ self._awg.disable_iq_modulation() def run(self, sync=True) -> None: """Run the AWG Core. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after enabling the AWG Core (default: True). """ self._awg.run(sync=sync) def stop(self, sync=True) -> None: """Stop the AWG Core. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after disabling the AWG Core (default: True). """ self._awg.stop(sync=sync) def wait_done(self, timeout: float = 10, sleep_time: float = 0.005) -> None: """Wait until the AWG Core is finished. Arguments: timeout (float): The maximum waiting time in seconds for the AWG Core (default: 10). sleep_time (float): Time in seconds to wait between requesting AWG state Raises: ToolkitError: If the AWG is running in continuous mode. TimeoutError: If the AWG is not finished before the timeout. """ self._awg.wait_done(timeout=timeout, sleep_time=sleep_time) def compile(self) -> None: """Compiles the current SequenceProgram on the AWG Core. Raises: ToolkitConnectionError: If the AWG Core has not been set up yet ToolkitError: if the compilation has failed or the ELF upload is not successful. TimeoutError: if the program upload is not completed before timeout. """ self._awg.compile() def reset_queue(self) -> None: """Resets the waveform queue to an empty list.""" self._awg.reset_queue() def queue_waveform( self, wave1: Union[List, np.array], wave2: Union[List, np.array], delay: float = 0, ) -> None: """Queues up a waveform to the *AWG Core*. Uploading custom waveforms is only possible when using the *'Simple'* or *'Custom'* sequence types. The waveform is specified with two numpy arrays for the two channels of the *AWG Core*. The waveform will then automatically align them to the correct minimum waveform length, sample granularity and scaling. An individual delay can be specified to shift the individual waveform with respect to the time origin of the period. Arguments: wave1 (array like): A list or array of samples in the waveform to be queued for channel 1. An empty list '[]' will upload zeros of the minimum waveform length. wave2 (array like): A list or array of samples in the waveform to be queued for channel 2. An empty list '[]' will upload zeros of the minimum waveform length. delay (float): An individual delay for the queued sequence with respect to the time origin. Positive values shift the start of the waveform forwards in time. (default: 0) Raises: ToolkitError: If the sequence is not of type *'Simple'* or *'Custom'*. """ self._awg.queue_waveform(wave1, wave2, delay=delay) def replace_waveform( self, wave1: Union[List, np.array], wave2: Union[List, np.array], i: int = 0, delay: float = 0, ) -> None: """Replaces the data in a waveform in the queue. The new data must have the same length as the previous data s.t. the waveform data can be replaced without recompilation of the sequence program. Arguments: wave1 (array): Waveform to replace current wave for Channel 1. wave2 (array): Waveform to replace current wave for Channel 2. i (int): The index of the waveform in the queue to be replaced. delay (int): An individual delay in seconds for this waveform w.r.t. the time origin of the sequence (default: 0). Raises: ValueError: If the given index is out of range. """ self._awg.replace_waveform(wave1, wave2, i=i, delay=delay) def upload_waveforms(self) -> None: """Uploads all waveforms in the queue to the AWG Core. This method only works as expected if the Sequence Program is in 'Simple' or 'Custom' modes and has been compiled beforehand. See :func:`compile_and_upload_waveforms(...)`. """ self._awg.upload_waveforms() def compile_and_upload_waveforms(self) -> None: """Compiles the Sequence Program and uploads the queued waveforms. Simply combines the two methods to make sure the sequence is compiled before the waveform queue is uplaoded. """ self._awg.compile_and_upload_waveforms() def set_sequence_params(self, **kwargs) -> None: """Sets the parameters of the *Sequence Program*. Passes all the keyword arguments to the `set_param(...)` method of the *Sequence Program*. The available sequence parameters may vary between different sequences. For a list of all current sequence parameters see the method `sequence_params()`. They include: *'sequence_type', 'period', 'repetitions', 'trigger_mode', 'trigger_delay', ...* >>> shfsg.awgs[0].set_sequence_params( >>> sequence_type="Simple", >>> trigger_mode="Send Trigger", >>> repetitions=1e6, >>> alignemnt="Start with Trigger" >>> ) """ self._awg.set_sequence_params(**kwargs) def sequence_params(self) -> Dict: """Returns the current sequence parameters. Returns: A dictionary with the current sequence parameters. """ return self._awg.sequence_params @property def waveforms(self): return self._awg.waveforms @property def is_running(self): return self._awg.is_running @property def index(self): return self._awg.index class Sine(InstrumentChannel): """Sine of a SGChannel for the SHFSG Inherits from :class:`InstrumentChannel` and wraps around a `Sine` for *SHFSG* from :mod:`zhinst-toolkit`. This class adds *Parameters* from the :mod:`zhinst-toolkit` as *QCoDeS Parameters* and wraps all methods of the *toolkit's* `Sine`. Arguments: name (str): The name of the `AWG` submodule. parent_instr (:class:`qcodes.instrument.base.Instrument`): The QCoDeS parent instrument of the `InstrumentChannel`. parent_contr (:class:`zhinst.toolkit.BaseInstrument`): The `_controller` of the parent instrument that is used for getting and setting parameters. Attributes: osc_select (:class:`Parameter`): Select Oscillator. harmonic (:class:`Parameter`): Harmonic. phaseshift (:class:`Parameter`): Phaseshift. i_enable (:class:`Parameter`): Enable I Channel. i_sin (:class:`Parameter`): Amplitude of Sine in I Channel i_cos (:class:`Parameter`): Amplitude of Cosine in I Channel q_enable (:class:`Parameter`): Enable Q Channel. q_sin (:class:`Parameter`): Amplitude of Sine in Q Channel q_cos (:class:`Parameter`): Amplitude of Cosine in Q Channel """ def __init__(self, name: str, parent_instr, parent) -> None: InstrumentChannel.__init__(self, parent_instr, name) self._sine = parent._sgchannel.sine self._add_qcodes_params() def _add_qcodes_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "osc_select", unit=self._sine.osc_select._unit, docstring=self._sine.osc_select.__repr__(), get_cmd=self._sine.osc_select, set_cmd=self._sine.osc_select, label="Selected Oscillator", ) self.add_parameter( "harmonic", unit=self._sine.harmonic._unit, docstring=self._sine.harmonic.__repr__(), get_cmd=self._sine.harmonic, set_cmd=self._sine.harmonic, label="Harmonic", ) # self.add_parameter( # "freq", # unit=self._sine.freq._unit, # docstring=self._sine.freq.__repr__(), # get_cmd=self._sine.freq, # set_cmd=self._sine.freq, # label="Frequency", # ) self.add_parameter( "phaseshift", unit=self._sine.phaseshift._unit, docstring=self._sine.phaseshift.__repr__(), get_cmd=self._sine.phaseshift, set_cmd=self._sine.phaseshift, label="Phaseshift", ) self.add_parameter( "i_enable", unit=self._sine.i_enable._unit, docstring=self._sine.i_enable.__repr__(), get_cmd=self._sine.i_enable, set_cmd=self._sine.i_enable, label="Enable I Channel", ) self.add_parameter( "i_sin", unit=self._sine.i_sin._unit, docstring=self._sine.i_sin.__repr__(), get_cmd=self._sine.i_sin, set_cmd=self._sine.i_sin, label="Amplitude of Sine in I Channel", ) self.add_parameter( "i_cos", unit=self._sine.i_cos._unit, docstring=self._sine.i_cos.__repr__(), get_cmd=self._sine.i_cos, set_cmd=self._sine.i_cos, label="Amplitude of Cosine in I Channel", ) self.add_parameter( "q_enable", unit=self._sine.q_enable._unit, docstring=self._sine.q_enable.__repr__(), get_cmd=self._sine.q_enable, set_cmd=self._sine.q_enable, label="Enable Q Channel", ) self.add_parameter( "q_sin", unit=self._sine.q_sin._unit, docstring=self._sine.q_sin.__repr__(), get_cmd=self._sine.q_sin, set_cmd=self._sine.q_sin, label="Amplitude of Sine in Q Channel", ) self.add_parameter( "q_cos", unit=self._sine.q_cos._unit, docstring=self._sine.q_cos.__repr__(), get_cmd=self._sine.q_cos, set_cmd=self._sine.q_cos, label="Amplitude of Cosine in Q Channel", ) class SHFSG(ZIBaseInstrument): """QCoDeS driver for the *Zurich Instruments SHFSG*. Inherits from :class:`ZIBaseInstrument`. Initializes some *submodules* from the device's nodetree and a :class:`ChannelList` of device-specific `SGChannels` for high-level control of the *signal output* and *AWG Cores*. Arguments: name (str): The internal QCoDeS name of the instrument. serial (str): The device serial number, e.g. *'dev1234'*. interface (str): The interface used to connect to the device. (default: '1gbe') host (str): Address of the data server. (default: 'localhost') port (int): Port used to connect to the data server. (default: 8004) api (int): Api level used for the data server. (default: 6) Attributes: sgchannel (:class:`ChannelList`): A list of four/eight *SHFSG* specific *SGChannels* (:class:`zhinst.qcodes.shfsg.sgchannel`). """ def __init__( self, name: str, serial: str, interface: str = "1gbe", host: str = "localhost", port: int = 8004, api: int = 6, **kwargs, ) -> None: super().__init__(name, "shfsg", serial, interface, host, port, api, **kwargs) submodules = self.nodetree_dict.keys() # initialize submodules from nodetree with blacklist blacklist = ["sgchannels"] [self._init_submodule(key) for key in submodules if key not in blacklist] def _connect(self) -> None: """Connects the device to the data server. Instantiates the device controller from :mod:`zhinst-toolkit`, sets up the data server and connects the device the data server. This method is called from `__init__` of the :class:`BaseInstrument` class. """ self._controller = tk.SHFSG( self._name, self._serial, interface=self._interface, host=self._host, port=self._port, api=self._api, ) self._controller.setup() self._controller.connect_device() self.connect_message() self.nodetree_dict = self._controller.nodetree._nodetree_dict self._init_sg_channels() self._add_qcodes_params() def _init_sg_channels(self): """initialize ChannelList of SGChannels""" channel_list = ChannelList(self, "sgchannels", SGChannel) for i in range(self._controller.num_sgchannels()): channel_list.append(SGChannel(f"sgchannel-{i}", i, self, self._controller)) channel_list.lock() self.add_submodule("sgchannels", channel_list) def _add_qcodes_params(self): # add custom parameters as QCoDeS parameters super()._add_qcodes_params() self.add_parameter( "ref_clock", unit=self._controller.ref_clock._unit, docstring=self._controller.ref_clock.__repr__(), get_cmd=self._controller.ref_clock, set_cmd=self._controller.ref_clock, label="Intended Reference Clock Source", ) self.add_parameter( "ref_clock_status", unit=self._controller.ref_clock_status._unit, docstring=self._controller.ref_clock_status.__repr__(), get_cmd=self._controller.ref_clock_status, set_cmd=self._controller.ref_clock_status, label="Status Reference Clock", ) def factory_reset(self, sync=True) -> None: """Load the factory default settings. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after loading the factory preset (default: True). """ self._controller.factory_reset(sync=sync) def enable_qccs_mode(self) -> None: """Configure the instrument to work with PQSC This method sets the reference clock source and DIO settings correctly to connect the instrument to the PQSC. """ self._controller.enable_qccs_mode() def enable_manual_mode(self) -> None: """Disconnect from PQSC This method sets the reference clock source and DIO settings to factory default states and the instrument is disconnected from the PQSC. """ self._controller.enable_manual_mode() @property def allowed_sequences(self): return self._controller.allowed_sequences @property def allowed_trigger_modes(self): return self._controller.allowed_trigger_modes ``` #### File: control/drivers/uhfqa.py ```python from .base import ZIBaseInstrument from qcodes.instrument.channel import ChannelList, InstrumentChannel import qcodes.utils.validators as vals import zhinst.toolkit as tk from zhinst.toolkit.control.drivers.uhfqa import ( ReadoutChannel, AWG as UHFQA_AWG, UHFScope as UHFQA_Scope, ) from typing import List, Dict, Union import numpy as np class AWG(InstrumentChannel): """Device-specific *AWG Core* for the *UHFQA*. Inherits from :class:`InstrumentChannel` and wraps around a `AWGCore` for *HDAWG* from :mod:`zhinst-toolkit`. This class adds Parameters from the :mod:`zhinst-toolkit` as *QCoDeS Parameters* and wraps all methods of the *toolkit's* `AWGCore`. Arguments: name (str): The name of the `AWG` submodule. parent_instr (:class:`qcodes.instrument.base.Instrument`): The QCoDeS parent instrument of the `InstrumentChannel`. parent_contr (:class:`zhinst.toolkit.BaseInstrument`): The `_controller` of the parent instrument that is used for getting and setting parameters. Attributes: output1 (:class:`Parameter`): The state of the output of channel 1. Can be one of {'on', 'off'}. output2 (:class:`Parameter`): The state of the output of channel 2. Can be one of {'on', 'off'}. gain1 (:class:`Parameter`): Gain of the output channel 1. The value must be between -1 and +1 (default: +1). gain2 (:class:`Parameter`): Gain of the output channel 2. The value must be between -1 and +1 (default: +1). waveforms (list): A list of `Waveforms` that respresent the queue of waveforms to upload to the device when the sequence type is *'Simple'*. is_running (bool): A flag that shows if the `AWG Core` is currently running or not. index (int): The index of the `AWG Core` in the list of *awgs*. """ def __init__(self, name: str, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._awg = UHFQA_AWG(parent_contr, 0) self._awg._setup() self._awg._init_awg_params() self._add_qcodes_awg_params() def _add_qcodes_awg_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "output1", unit=self._awg.output1._unit, docstring=self._awg.output1.__repr__(), get_cmd=self._awg.output1, set_cmd=self._awg.output1, label="Output Ch 1", ) self.add_parameter( "output2", unit=self._awg.output2._unit, docstring=self._awg.output2.__repr__(), get_cmd=self._awg.output2, set_cmd=self._awg.output2, label="Output Ch 2", ) self.add_parameter( "gain1", unit=self._awg.gain1._unit, docstring=self._awg.gain1.__repr__(), get_cmd=self._awg.gain1, set_cmd=self._awg.gain1, label="Gain Ch 1", ) self.add_parameter( "gain2", unit=self._awg.gain2._unit, docstring=self._awg.gain2.__repr__(), get_cmd=self._awg.gain2, set_cmd=self._awg.gain2, label="Gain Ch 2", ) self.add_parameter( "single", unit=self._awg.single._unit, docstring=self._awg.single.__repr__(), get_cmd=self._awg.single, set_cmd=self._awg.single, label="AWG Single Shot Mode", ) def outputs(self, value=None): """Sets both signal outputs simultaneously. Arguments: value (tuple): Tuple of values {'on', 'off'} for channel 1 and 2 (default: None). Returns: A tuple with the states {'on', 'off'} for the two output channels if the keyword argument is not given. Raises: ValueError: If the `value` argument is not a list or tuple of length 2. """ return self._awg.outputs(value=value) def run(self, sync=True) -> None: """Run the AWG Core. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after enabling the AWG Core (default: True). """ self._awg.run(sync=sync) def stop(self, sync=True) -> None: """Stop the AWG Core. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after disabling the AWG Core (default: True). """ self._awg.stop(sync=sync) def wait_done(self, timeout: float = 10, sleep_time: float = 0.005) -> None: """Wait until the AWG Core is finished. Arguments: timeout (float): The maximum waiting time in seconds for the AWG Core (default: 10). sleep_time (float): Time in seconds to wait between requesting AWG state Raises: ToolkitError: If the AWG is running in continuous mode. TimeoutError: If the AWG is not finished before the timeout. """ self._awg.wait_done(timeout=timeout, sleep_time=sleep_time) def compile(self) -> None: """Compiles the current SequenceProgram on the AWG Core. Raises: ToolkitConnectionError: If the AWG Core has not been set up yet ToolkitError: if the compilation has failed or the ELF upload is not successful. TimeoutError: if the program upload is not completed before timeout. """ self._awg.compile() def reset_queue(self) -> None: """Resets the waveform queue to an empty list.""" self._awg.reset_queue() def queue_waveform( self, wave1: Union[List, np.array], wave2: Union[List, np.array], delay: float = 0, ) -> None: """Queues up a waveform to the *AWG Core*. Uploading custom waveforms is only possible when using the *'Simple'* or *'Custom'* sequence types. The waveform is specified with two numpy arrays for the two channels of the *AWG Core*. The waveform will then automatically align them to the correct minimum waveform length, sample granularity and scaling. An individual delay can be specified to shift the individual waveform with respect to the time origin of the period. Arguments: wave1 (array like): A list or array of samples in the waveform to be queued for channel 1. An empty list '[]' will upload zeros of the minimum waveform length. wave2 (array like): A list or array of samples in the waveform to be queued for channel 2. An empty list '[]' will upload zeros of the minimum waveform length. delay (float): An individual delay for the queued sequence with respect to the time origin. Positive values shift the start of the waveform forwards in time. (default: 0) Raises: ToolkitError: If the sequence is not of type *'Simple'* or *'Custom'*. """ self._awg.queue_waveform(wave1, wave2, delay=delay) def replace_waveform( self, wave1: Union[List, np.array], wave2: Union[List, np.array], i: int = 0, delay: float = 0, ) -> None: """Replaces the data in a waveform in the queue. The new data must have the same length as the previous data s.t. the waveform data can be replaced without recompilation of the sequence program. Arguments: wave1 (array): Waveform to replace current wave for Channel 1. wave2 (array): Waveform to replace current wave for Channel 2. i (int): The index of the waveform in the queue to be replaced. delay (int): An individual delay in seconds for this waveform w.r.t. the time origin of the sequence (default: 0). Raises: ValueError: If the given index is out of range. """ self._awg.replace_waveform(wave1, wave2, i=i, delay=delay) def upload_waveforms(self) -> None: """Uploads all waveforms in the queue to the AWG Core. This method only works as expected if the Sequence Program is in 'Simple' or 'Custom' modes and has been compiled beforehand. See :func:`compile_and_upload_waveforms(...)`. """ self._awg.upload_waveforms() def compile_and_upload_waveforms(self) -> None: """Compiles the Sequence Program and uploads the queued waveforms. Simply combines the two methods to make sure the sequence is compiled before the waveform queue is uplaoded. """ self._awg.compile_and_upload_waveforms() def set_sequence_params(self, **kwargs) -> None: """Sets the parameters of the *Sequence Program*. Passes all the keyword arguments to the `set_param(...)` method of the *Sequence Program*. The available sequence parameters may vary between different sequences. For a list of all current sequence parameters see the method `sequence_params()`. They include: *'sequence_type', 'period', 'repetitions', 'trigger_mode', 'trigger_delay', ...* >>> hdawg.awgs[0].set_sequence_params( >>> sequence_type="Simple", >>> trigger_mode="Send Trigger", >>> repetitions=1e6, >>> alignemnt="Start with Trigger" >>> ) """ self._awg.set_sequence_params(**kwargs) def sequence_params(self) -> None: """Returns the current seuence parameters. Returns: A dictionary with the current sequence parameters. """ return self._awg.sequence_params @property def waveforms(self): return self._awg.waveforms @property def is_running(self): return self._awg.is_running @property def index(self): return self._awg.index class Channel(InstrumentChannel): """Implements a *Readout Channel* for the *UHFQA*. Inherits from :class:`InstrumentChannel` and wraps around a `ReadoutChannel` for *UHFQA* from :mod:`zhinst-toolkit`. This class adds *Parameters* from the :mod:`zhinst-toolkit` as *QCoDeS Parameters* and wraps all methods of the *toolkit's* `ReadoutChannel`. This class represents the signal processing chain for one of the ten :class:`ReadoutChannels` of a UHFQA. One channel is typically used for dispersive resonator readout of a superconducting Qubit. >>> ch = uhfqa.channels[0] >>> uhfqa.result_source("Threshold") >>> ... >>> ch.enable() >>> ch.readout_frequency(85.6e6) >>> ch.rotation(123.4) >>> ch.threshold(-56.78) >>> ... >>> ch.result() array([0.0, 1.0, 1.0, 1.0, 0.0, ...]) The readout channel can be enabled with `enable()` which means that the weighted integration mode is activated and integration weights are set to demodulate the signal at the given readout frequency. If the channel is enabled, the readout parameters are also used for signal generation in the :class:`AWGCore` if the sequence type is set to "Readout". Arguments: name (str): The name of the `Channel` submodule. parent_instr (:class:`qcodes.instrument.base.Instrument`): The QCoDeS parent instrument of the `InstrumentChannel`. parent_contr (:class:`zhinst.toolkit.BaseInstrument`): The `_controller` of the parent instrument that is used for getting and setting parameters. Attributes: index (int): The index of the Readout Channel from 1 - 10. rotation (:class:`Parameter`): The rotation applied to the signal in IQ plane. The angle is specified in degrees. threshold (:class:`Parameter`): The signal threshold used for state discrimination in the thresholding unit. result (:class:`Parameter`): This read-only Parameter holds the result vector for the given readout channel as a 1D numpy array. """ def __init__(self, name: str, index: int, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._channel = ReadoutChannel(parent_contr, index) self._channel._init_channel_params() self._add_qcodes_channel_params() def _add_qcodes_channel_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "rotation", unit=self._channel.rotation._unit, docstring=self._channel.rotation.__repr__(), get_cmd=self._channel.rotation, set_cmd=self._channel.rotation, label="Rotation", ) self.add_parameter( "threshold", unit=self._channel.threshold._unit, docstring=self._channel.threshold.__repr__(), get_cmd=self._channel.threshold, set_cmd=self._channel.threshold, label="Threshold", vals=vals.Numbers(), ) self.add_parameter( "readout_frequency", unit="Hz", docstring="Readout frequency of the channel. Is used to create a readout " "tone and to set the integration weights.", get_cmd=self._channel.readout_frequency, set_cmd=self._channel.readout_frequency, label="Readout Frequency", vals=vals.Numbers(), ) self.add_parameter( "int_weights_envelope", docstring="Envelope values multiplied with the integration weights.", get_cmd=self._channel.int_weights_envelope, set_cmd=self._channel.int_weights_envelope, label="Integration Weights Envelope", ) self.add_parameter( "readout_amplitude", unit="Hz", docstring="The amplitude of the readout tone associated with this channel. " "Used in a 'Readout' sequence.", get_cmd=self._channel.readout_amplitude, set_cmd=self._channel.readout_amplitude, label="Readout Amplitude", vals=vals.Numbers(), ) self.add_parameter( "phase_shift", unit="Hz", docstring="The phase shift of the readout tone associated with this " "channel. Used in a 'Readout' sequence.", get_cmd=self._channel.phase_shift, set_cmd=self._channel.phase_shift, label="Readout Phase Shift", vals=vals.Numbers(), ) self.add_parameter( "result", unit=self._channel.result._unit, docstring=self._channel.result.__repr__(), get_cmd=self._channel.result, label="Result", ) def enabled(self) -> None: """Returns if weighted integration is enabled.""" return self._channel.enabled() def enable(self) -> None: """Enable weighted integration for this channel. This method also sets the corresponding integration weights to demodulate at the given readout frequency. """ self._channel.enable() def disable(self) -> None: """Disable weighted integration for this channel. This method also resets the corresponding integration weights. """ self._channel.disable() @property def index(self): return self._channel.index class Scope(InstrumentChannel): """Device-specific *Scope* for the *UHFQA*.""" def __init__(self, name: str, parent_instr, parent_contr) -> None: super().__init__(parent_instr, name) self._scope = UHFQA_Scope(parent_contr) self._scope._setup() self._scope._init_scope_params() self._scope._init_scope_settings() self._add_qcodes_scope_params() def _add_qcodes_scope_params(self): # add custom parameters as QCoDeS parameters self.add_parameter( "single", unit=self._scope.single._unit, docstring=self._scope.single.__repr__(), get_cmd=self._scope.single, set_cmd=self._scope.single, label="Scope Single Shot Mode", ) self.add_parameter( "length", unit=self._scope.length._unit, docstring=self._scope.length.__repr__(), get_cmd=self._scope.length, set_cmd=self._scope.length, label="Length of Scope Shot", ) self.add_parameter( "trigger_source", unit=self._scope.trigger_source._unit, docstring=self._scope.trigger_source.__repr__(), get_cmd=self._scope.trigger_source, set_cmd=self._scope.trigger_source, label="Scope Trigger Source", ) self.add_parameter( "trigger_level", unit=self._scope.trigger_level._unit, docstring=self._scope.trigger_level.__repr__(), get_cmd=self._scope.trigger_level, set_cmd=self._scope.trigger_level, label="Scope Trigger Level", ) self.add_parameter( "trigger_enable", unit=self._scope.trigger_enable._unit, docstring=self._scope.trigger_enable.__repr__(), get_cmd=self._scope.trigger_enable, set_cmd=self._scope.trigger_enable, label="Enable Triggered Scope Shot", ) self.add_parameter( "trigger_reference", unit=self._scope.trigger_reference._unit, docstring=self._scope.trigger_reference.__repr__(), get_cmd=self._scope.trigger_reference, set_cmd=self._scope.trigger_reference, label="Trigger Reference Position", ) self.add_parameter( "trigger_holdoff", unit=self._scope.trigger_holdoff._unit, docstring=self._scope.trigger_holdoff.__repr__(), get_cmd=self._scope.trigger_holdoff, set_cmd=self._scope.trigger_holdoff, label="Hold off Time Inbetween Acquiring Triggers", ) def arm( self, sync=True, num_records: int = None, averager_weight: int = None ) -> None: """Prepare the scope for recording. This method tells the scope module to be ready to acquire data and resets the scope module's progress to 0.0. Optionally, the *number of records* and *averager weight* can be set when specified as keyword argument. If it is not specified, it is not changed. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after preparing scope (default: True). num_records (int): The number of scope records to acquire (default: None). averager_weight (int): Averager weight parameter. Averaging is disabled if it is set to 1. For values greater than 1, the scope record shots are averaged using an exponentially weighted moving average (default: None). """ self._scope.arm( sync=sync, num_records=num_records, averager_weight=averager_weight ) def run(self, sync=True) -> None: """Run the scope recording. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after enabling the scope (default: True). """ self._scope.run(sync=sync) def arm_and_run(self, num_records: int = None, averager_weight: int = None) -> None: """Arm the scope and start recording Simply combines the methods arm and run. A synchronisation is performed between the device and the data server after preparing scope. Arguments: num_records (int): The number of scope records to acquire (default: None). averager_weight (int): Averager weight parameter. Averaging is disabled if it is set to 1. For values greater than 1, the scope record shots are averaged using an exponentially weighted moving average (default: None). """ self._scope.arm_and_run( num_records=num_records, averager_weight=averager_weight ) def stop(self, sync=True) -> None: """Stops the scope recording. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after disabling the scope (default: True). """ self._scope.stop(sync=sync) def wait_done(self, timeout: float = 10, sleep_time: float = 0.005) -> None: """Wait until the Scope recording is finished. Arguments: timeout (float): The maximum waiting time in seconds for the Scope (default: 10). sleep_time (float): Time in seconds to wait between requesting the progress and records values Raises: TimeoutError: If the Scope recording is not done before the timeout. """ self._scope.wait_done(timeout=timeout, sleep_time=sleep_time) def read( self, channel=None, blocking: bool = True, timeout: float = 10, sleep_time: float = 0.005, ): """Read out the recorded data from the specified channel of the scope. Arguments: channel (int): The scope channel to read the data from. If no channel is specified, the method will return the data for all channels (default: None). blocking (bool): A flag that specifies if the program should be blocked until the Scope Module has received and processed the desired number of records (default: True). timeout (float): The maximum waiting time in seconds for the Scope (default: 10). sleep_time (float): Time in seconds to wait between requesting the progress and records values Raises: TimeoutError: If the Scope recording is not done before the timeout. Returns: A dictionary showing the recorded data and scope time. """ return self._scope.read( channel=channel, blocking=blocking, timeout=timeout, sleep_time=sleep_time ) def channels(self, value=None): """Set all Scope channels simultaneously. Arguments: value (tuple): Tuple of values {'on', 'off'} for channel 1 and 2 (default: None). Returns: A tuple with the states {'on', 'off'} for all input channels. """ return self._scope.channels(value=value) def mode(self, value=None): """Set or get scope data processing mode. Arguments: value (str): Can be either "time" or "FFT" (default: None). Returns: If no argument is given the method returns the current scope data processing mode. """ return self._scope.mode(value=value) def num_records(self, value=None): """Set or get the number of scope records to acquire. Arguments: value (int): The number of scope records to acquire (default: None). Returns: If no argument is given the method returns the current number of scope records to acquire. """ return self._scope.num_records(value=value) def averager_weight(self, value=None): """Set or get the averager weight parameter. Arguments: value (int): Averager weight parameter. Averaging is disabled if it is set to 1. For values greater than 1, the scope record shots are averaged using an exponentially weighted moving average (default: None). Returns: If no argument is given the method returns the current scope data processing mode. """ return self._scope.averager_weight(value=value) @property def is_running(self): return self._scope.is_running class UHFQA(ZIBaseInstrument): """QCoDeS driver for the *Zurich Instruments UHFQA*. Inherits from :class:`ZIBaseInstrument`. Initializes some *submodules* from the device's nodetree and a device-specific *AWG Core*. It also features a :class:`ChannelList` of ten *Readout Channels* (:class:`Channel`). Arguments: name (str): The internal QCoDeS name of the instrument. serial (str): The device serial number, e.g. *'dev1234'*. Keyword Arguments: interface (str): The interface used to connect to the device. (default: '1gbe') host (str): Address of the data server. (default: 'localhost') port (int): Port used to connect to the data server. (default: 8004) api (int): Api level used for the data server. (default: 6) Attributes: awg (:class:`zhinst.qcodes.uhfqa.AWG`): A *UHFQA* specific *AWG Core*. channels (:class:`ChannelList`): A list of ten *Readout Channels* (:class:`zhinst.qcodes.uhfqa.Channel`). """ def __init__( self, name: str, serial: str, interface: str = "1gbe", host: str = "localhost", port: int = 8004, api: int = 6, **kwargs, ) -> None: super().__init__(name, "uhfqa", serial, interface, host, port, api, **kwargs) submodules = self.nodetree_dict.keys() blacklist = ["awgs", "scopes"] [self._init_submodule(key) for key in submodules if key not in blacklist] def _connect(self) -> None: """Connects the device to the data server. Instantiates the device controller from :mod:`zhinst-toolkit`, sets up the data server and connects the device the data server. This method is called from `__init__` of the :class:`BaseInstrument` class. """ self._controller = tk.UHFQA( self._name, self._serial, interface=self._interface, host=self._host, port=self._port, api=self._api, ) self._controller.setup() self._controller.connect_device() self.connect_message() self.nodetree_dict = self._controller.nodetree._nodetree_dict self._init_readout_channels() self._init_awg_channels() self._init_scope() self._add_qcodes_params() def _init_readout_channels(self): # init submodules for ReadoutChannels channel_list = ChannelList(self, "channels", Channel) for i in range(10): channel_list.append(Channel(f"ch-{i}", i, self, self._controller)) channel_list.lock() self.add_submodule("channels", channel_list) def _init_awg_channels(self): # init submodule AWG self.add_submodule("awg", AWG("awg", self, self._controller)) def _init_scope(self): # init submodule Scope self.add_submodule("scope", Scope("scope", self, self._controller)) def _add_qcodes_params(self): # add custom parameters as QCoDeS parameters super()._add_qcodes_params() self.add_parameter( "crosstalk_matrix", docstring="The 10x10 crosstalk suppression matrix that multiplies the 10 " "signal paths. Can be set only partially.", get_cmd=self._controller.crosstalk_matrix, set_cmd=self._controller.crosstalk_matrix, label="Crosstalk Matrix", ) sources = [ "Crosstalk", "Integration", "Threshold", "Crosstalk Correlation", "Threshold Correlation", "Rotation", ] self.add_parameter( "result_source", docstring=f"The signal source for QA Results. Has to be one of {sources}.", get_cmd=self._controller.result_source, set_cmd=self._controller.result_source, label="Result Source", vals=vals.Enum(*sources), ) self.add_parameter( "integration_time", unit=self._controller.integration_time._unit, docstring=self._controller.integration_time.__repr__(), get_cmd=self._controller.integration_time, set_cmd=self._controller.integration_time, label="Integration Time", ) self.add_parameter( "integration_length", unit=self._controller.integration_length._unit, docstring=self._controller.integration_length.__repr__(), get_cmd=self._controller.integration_length, set_cmd=self._controller.integration_length, label="Integration Length", ) self.add_parameter( "averaging_mode", docstring=self._controller.averaging_mode._description, get_cmd=self._controller.averaging_mode, set_cmd=self._controller.averaging_mode, label="Averaging Mode", vals=vals.Enum("Cyclic", "Sequential"), ) self.add_parameter( "qa_delay", docstring="The adjustment in the the quantum analyzer delay " "in units of samples.", get_cmd=self._controller.qa_delay, set_cmd=self._controller.qa_delay, label="Quantum Analyzer Delay", vals=vals.Numbers(), ) self.add_parameter( "ref_clock", unit=self._controller.ref_clock._unit, docstring=self._controller.ref_clock.__repr__(), get_cmd=self._controller.ref_clock, set_cmd=self._controller.ref_clock, label="Intended Reference Clock Source", ) def factory_reset(self, sync=True) -> None: """Load the factory default settings. Arguments: sync (bool): A flag that specifies if a synchronisation should be performed between the device and the data server after loading the factory preset (default: True). """ self._controller.factory_reset(sync=sync) def arm(self, length=None, averages=None) -> None: """Prepare UHFQA for result acquisition. This method enables the QA Results Acquisition and resets the acquired points. Optionally, the *result length* and *result averages* can be set when specified as keyword arguments. If they are not specified,they are not changed. Arguments: length (int): If specified, the length of the result vector will be set before arming the UHFQA readout (default: None). averages (int): If specified, the result averages will be set before arming the UHFQA readout (default: None). """ self._controller.arm(length=length, averages=averages) def enable_readout_channels(self, channels: List = range(10)) -> None: """Enable weighted integration on the specified readout channels. Arguments: channels (list): A list of indices of channels to enable. (default: range(10)) Raises: ValueError: If the channel list contains an element outside the allowed range. """ self._controller.enable_readout_channels(channels=channels) def disable_readout_channels(self, channels: List = range(10)) -> None: """Disable weighted integration on the specified readout channels. Arguments: channels (list): A list of indices of channels to disable. (default: range(10)) Raises: ValueError: If the channel list contains an element outside the allowed range. """ self._controller.disable_readout_channels(channels=channels) def enable_qccs_mode(self) -> None: """Configure the instrument to work with PQSC. This method sets the reference clock source and DIO settings correctly to connect the instrument to the PQSC. """ self._controller.enable_qccs_mode() def enable_manual_mode(self) -> None: """Disconnect from the PQSC. This method sets the reference clock source and DIO settings to factory default states and the instrument is disconnected from the PQSC. """ self._controller.enable_manual_mode() @property def allowed_sequences(self): return self._controller.allowed_sequences @property def allowed_trigger_modes(self): return self._controller.allowed_trigger_modes ```
{ "source": "jensihnow/connected-drink-dispenser-workshop", "score": 3 }
#### File: lambda_functions/cog_post_confirm/lambda.py ```python import os import json import logging import boto3 from boto3.dynamodb.conditions import Key, Attr __copyright__ = ( "Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved." ) __license__ = "MIT-0" logger = logging.getLogger() logger.setLevel(logging.INFO) cog_client = boto3.client("cognito-idp") ddb_client = boto3.resource("dynamodb") def set_dispenser_attrib(username, user_pool_id, dispenser_id): """Add dispenserId attribute to the user entry""" cog_client.admin_update_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "custom:dispenserId", "Value": dispenser_id}], ) def set_group_attrib(username, user_pool_id, group): """Add custom:group attribute to the user entry""" cog_client.admin_update_user_attributes( UserPoolId=user_pool_id, Username=username, UserAttributes=[{"Name": "custom:group", "Value": group}], ) def handler(event, context): """Main entry into function""" logger.info("Received event: %s", json.dumps(event)) logger.info("Received environ: %s", os.environ) # Create new user entry with username username = event["userName"] table = ddb_client.Table(os.environ["USER_TABLE"]) # Get tracking record details (next dispenser) response = table.query(KeyConditionExpression=Key("userName").eq("admin")) # If first run (CloudFormation deploy or delete of UseTable records), # create tracking record and then the user - Completed during deployment of stack if len(response["Items"]) == 0: # First time, create the tracking record logging.info("No tracking record found in UserTable, creating admin user") response = table.put_item(Item={"userName": "admin", "nextDispenserId": "100"}) # Create the user *if* it isn't the admin user if username != "admin": response = table.put_item( Item={"userName": username, "dispenserId": "100", "assets": None} ) set_dispenser_attrib( username=username, user_pool_id=event["userPoolId"], dispenser_id="100" ) set_group_attrib( username=username, user_pool_id=event["userPoolId"], group="user" ) else: # Admin record was deleted, reset response = table.put_item( Item={"userName": username, "nextDispenserId": "100"} ) else: # Create user with next dispenser disp_id = response["Items"][0]["nextDispenserId"] response = table.put_item( Item={"userName": "admin", "nextDispenserId": f"{int(disp_id)+1}"} ) response = table.put_item( Item={"userName": username, "dispenserId": f"{disp_id}", "assets": None} ) set_dispenser_attrib( username=username, user_pool_id=event["userPoolId"], dispenser_id=disp_id ) set_group_attrib( username=username, user_pool_id=event["userPoolId"], group="user" ) # No additional information is expected in the response, return event untouched return event ``` #### File: lambda_functions/cr_delete_participant_users/index.py ```python import os import json import logging as log import boto3 from botocore.exceptions import ClientError import cfnresponse from boto3.dynamodb.conditions import Key, Attr __copyright__ = ( "Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved." ) __license__ = "MIT-0" log.getLogger().setLevel(log.INFO) ddb_client = boto3.resource("dynamodb") lambda_client = boto3.client("lambda") def delete_participant_users(user_table): """Read each non-admin userName and invoke lambda to delete the user, returns total delete participant users""" table = ddb_client.Table(user_table) delete_function_arn = os.environ["DELETE_USER_LAMBDA_FUNCTION"] # Set event base to pass delete checks event = { "requestContext": {"authorizer": {"claims": {"custom:group": "admin"}}}, "body": "", } # First call and process deleted_users = 0 response = table.scan(ProjectionExpression="userName") for i in response["Items"]: # Invoke Lambda with username to delete if i["userName"] != "admin": print(f"deleting user {i['userName']}") event["body"] = json.dumps({"username": i["userName"]}) lambda_client.invoke( FunctionName=delete_function_arn, Payload=json.dumps(event) ) print(f"user {i['userName']} successfully deleted") deleted_users += 1 while "LastEvaluatedKey" in response: response = table.scan( ProjectionExpression="userName", Limit=1, ExclusiveStartKey=response["LastEvaluatedKey"], ) for i in response["Items"]: # Invoke Lambda with username to delete if i["userName"] != "admin": print(f"deleting user {i['userName']}") event["body"] = json.dumps({"username": i["userName"]}) lambda_client.invoke( FunctionName=delete_function_arn, Payload=json.dumps(event) ) deleted_users += 1 return deleted_users def main(event, context): # This needs to change if there are to be multiple resources # in the same stack physical_id = "DeleteParticipantUsers" try: log.info("Input event: %s", event) log.info("Environment variables: %s", os.environ) # Check if this is a Create and we're failing Creates if event["RequestType"] == "Create" and event["ResourceProperties"].get( "FailCreate", False ): raise RuntimeError("Create failure requested") elif event["RequestType"] == "Create": # No operation required for create, only act on delete attributes = { "Response": f"{physical_id} CREATE performed, no actions taken" } elif event["RequestType"] == "Update": # No operation required for update, only act on delete attributes = { "Response": f"{physical_id} UPDATE performed, no actions taken" } else: # delete all users and return response = delete_participant_users(os.environ["USER_TABLE"]) attributes = { "Response": f"{physical_id} DELETE performed, {response} users deleted" } cfnresponse.send(event, context, cfnresponse.SUCCESS, attributes, physical_id) except Exception as e: log.exception(e) # cfnresponse's error message is always "see CloudWatch" cfnresponse.send(event, context, cfnresponse.FAILED, {}, physical_id) ``` #### File: tests/bleTestsScripts/startTests_integration.py ```python import Queue import sys import os import threading import securityAgent import testutils import time from testClass import runTest from bleAdapter import bleAdapter def main(): scan_filter = dict() bleAdapter.init() agent = securityAgent.createSecurityAgent() # default DUT_name: nimble(without set_property) # TODO: check DUT with MAC address instead of name. scan_filter.update({ "UUIDs": [runTest.DUT_UUID_128]}) # default DUT_name: nimble(without set_property) # TODO: check DUT with MAC address instead of name. DUT_ORIGINAL_NAME = runTest.DUT_NAME runTest.DUT_NAME = "nimb" bleAdapter.setDiscoveryFilter(scan_filter) # Advertisement interval consistent after reset test # The first time uses different callback to get/check test device information. Use the second time and third time KPI to compare. # First time connection isTestSuccessFull = True bleAdapter.startDiscovery(runTest.discoveryEventCb) # firstStartScan = time.time() runTest.mainloop.run() # firstKPI = time.time() - firstStartScan runTest.submitTestResult(isTestSuccessFull, runTest.advertisement) bleAdapter.stopDiscovery() testDevice = runTest.getTestDevice() isTestSuccessFull = bleAdapter.connect(testDevice) runTest.submitTestResult(isTestSuccessFull, runTest.simpleConnection) time.sleep(2) #wait for connection parameters update isTestSuccessFull &= bleAdapter.disconnect() # Second time connection bleAdapter.startDiscovery(runTest.discoveryStartedCb)#wait for DUT to start advertising secondStartScan = time.time() runTest.mainloop.run() secondKPI = time.time() - secondStartScan bleAdapter.stopDiscovery() isConnectSuccessFull = bleAdapter.connect(testDevice) isTestSuccessFull &= isConnectSuccessFull runTest.submitTestResult(isTestSuccessFull, runTest.reConnection) time.sleep(2) #wait for connection parameters update # Second time disconnect isTestSuccessFull &= bleAdapter.disconnect() #Third time connection bleAdapter.startDiscovery(runTest.discoveryStartedCb)#wait for DUT to start advertising thirdStartScan = time.time() runTest.mainloop.run() thirdKPI = time.time() - thirdStartScan bleAdapter.stopDiscovery() isConnectSuccessFull = bleAdapter.connect(testDevice) isTestSuccessFull &= isConnectSuccessFull runTest.submitTestResult(isTestSuccessFull, runTest.reConnection) if thirdKPI > secondKPI * 10: isTestSuccessFull &= false # write result back to server isTestSuccessFull = runTest.discoverPrimaryServices() bleAdapter.gatt.updateLocalAttributeTable() isTestSuccessFull &= runTest.writeResultWithoutResponse(chr(isTestSuccessFull + 48)) runTest.submitTestResult(isTestSuccessFull, runTest.writeWithoutResponse) # Data size > MTU - 3 send notification test bleAdapter.setNotificationCallBack(runTest.notificationMTUCb) bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID) #subscribe for next test isTestSuccessFull = True runTest.mainloop.run() isTestSuccessFull = runTest.isNotificationDeclinedSuccessFull runTest.submitTestResult(isTestSuccessFull, runTest.notification) isTestSuccessFull = bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID, subscribe = False) #unsubscribe runTest.submitTestResult(isTestSuccessFull, runTest.removeNotification) isTestSuccessFull &= bleAdapter.disconnect() testutils.removeBondedDevices() runTest.DUT_NAME = DUT_ORIGINAL_NAME isTestSuccessFull = runTest.Advertise_With_16bit_ServiceUUID(scan_filter=scan_filter, bleAdapter=bleAdapter) runTest.submitTestResult(isTestSuccessFull, runTest.Advertise_With_16bit_ServiceUUID) isTestSuccessFull = runTest.Advertise_With_Manufacture_Data(scan_filter=scan_filter, bleAdapter=bleAdapter) runTest.submitTestResult(isTestSuccessFull, runTest.Advertise_With_Manufacture_Data) scan_filter.update({ "UUIDs": [runTest.DUT_UUID_128]}) # default DUT_name: nimble(without set_property) bleAdapter.setDiscoveryFilter(scan_filter) # First time connection isTestSuccessFull = True bleAdapter.startDiscovery(runTest.discoveryEventCb) runTest.mainloop.run() runTest.submitTestResult(isTestSuccessFull, runTest.advertisement) bleAdapter.stopDiscovery() testDevice = runTest.getTestDevice() isTestSuccessFull = bleAdapter.connect(testDevice) runTest.submitTestResult(isTestSuccessFull, runTest.simpleConnection) isTestSuccessFull = runTest.discoverPrimaryServices() runTest.submitTestResult(isTestSuccessFull, runTest.discoverPrimaryServices) bleAdapter.gatt.updateLocalAttributeTable() # Check device not present. After discovery of services, advertisement should have stopped. runTest.stopAdvertisement(scan_filter) # Check write and read bleAdapter.writeCharacteristic(runTest.DUT_OPEN_CHAR_UUID, runTest.DUT_OPEN_DESCR_UUID) bleAdapter.readCharacteristic(runTest.DUT_OPEN_CHAR_UUID) # Enable and receive notification and indication then disable. bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID) bleAdapter.subscribeForNotification(runTest.DUT_INDICATE_CHAR_UUID) #subscribe for next test time.sleep(2) #wait for connection parameters update # Check Notification and Indication bleAdapter.setNotificationCallBack(runTest.notificationCb) isTestSuccessFull = True runTest.mainloop.run() runTest.submitTestResult(isTestSuccessFull, runTest.notification) bleAdapter.setNotificationCallBack(runTest.indicationCb) isTestSuccessFull = True runTest.mainloop.run() runTest.submitTestResult(isTestSuccessFull, runTest.indication) isTestSuccessFull &= bleAdapter.disconnect() # Second time connection bleAdapter.startDiscovery(runTest.discoveryStartedCb)#wait for DUT to start advertising runTest.mainloop.run() bleAdapter.stopDiscovery() isConnectSuccessFull = bleAdapter.connect(testDevice) isTestSuccessFull &= isConnectSuccessFull runTest.submitTestResult(isTestSuccessFull, runTest.reConnection) bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID) bleAdapter.subscribeForNotification(runTest.DUT_INDICATE_CHAR_UUID) #subscribe for next test # Check write and read after reconnection bleAdapter.writeCharacteristic(runTest.DUT_OPEN_CHAR_UUID, runTest.DUT_OPEN_DESCR_UUID) bleAdapter.readCharacteristic(runTest.DUT_OPEN_CHAR_UUID) # Check Notification and Indication after reconnection bleAdapter.setNotificationCallBack(runTest.notificationCb) isTestSuccessFull = True runTest.mainloop.run() runTest.submitTestResult(isTestSuccessFull, runTest.notification) bleAdapter.setNotificationCallBack(runTest.indicationCb) isTestSuccessFull = True runTest.mainloop.run() runTest.submitTestResult(isTestSuccessFull, runTest.indication) isTestSuccessFull = bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID, subscribe = False) #unsubscribe isTestSuccessFull = True runTest.submitTestResult(isTestSuccessFull, runTest.removeNotification) isTestSuccessFull = bleAdapter.subscribeForNotification(runTest.DUT_INDICATE_CHAR_UUID, subscribe = False) #unsubscribe isTestSuccessFull = True runTest.submitTestResult(isTestSuccessFull, runTest.removeIndication) isTestSuccessFull &= bleAdapter.disconnect() testutils.removeBondedDevices() time.sleep(2) #wait for connection parameters update runTest.submitTestResult(isTestSuccessFull, runTest.disconnect) runTest.printTestsSummary() ``` #### File: tests/bleTestsScripts/testClass.py ```python import sys import bleAdapter from bleAdapter import bleAdapter import time import testutils import dbus.mainloop.glib try: from gi.repository import GObject except ImportError: import gobject as GObject class runTest: mainloop = GObject.MainLoop() DUT_GENERIC_STRING = "hello" DUT_FAIL_STRING = "fail" DUT_OPEN_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320002" DUT_OPEN_DESCR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320008" DUT_WRITE_NO_RESP_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320005" DUT_NOTIFY_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320006" DUT_INDICATE_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320007" DUT_ENCRYPT_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320003" DUT_ENCRYPT_DESCR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320009" DUT_UUID_128 = "8a7f1168-48af-4efb-83b5-e679f932db5a" DUT_UUID_16 = "abcd" DUT_SERVICEB_UUID = "8a7f1168-48af-4efb-83b5-e679f9320001" DUT_CHAR= {"8a7f1168-48af-4efb-83b5-e679f9320002": {"Flags":"read, write"}, "8a7f1168-48af-4efb-83b5-e679f9320003": {"Flags":"read, write"}, "8a7f1168-48af-4efb-83b5-e679f9320004": {"Flags":"read, write"}, "8a7f1168-48af-4efb-83b5-e679f9320005": {"Flags":"write-without-response"}, "8a7f1168-48af-4efb-83b5-e679f9320006": {"Flags":"notify"}, "8a7f1168-48af-4efb-83b5-e679f9320007": {"Flags":"indicate"}} DUT_DESCR = {"8a7f1168-48af-4efb-83b5-e679f9320008": None, "8a7f1168-48af-4efb-83b5-e679f9320009": None, "8a7f1168-48af-4efb-83b5-e679f932000a": None, "8a7f1168-48af-4efb-83b5-e679f932000b": None } DUT_NAME = "TEST" TEST_GROUP = "Full_BLE" TEST_NAME_PREFIX = "RaspberryPI" SHORT_LOCAL_NAME_SIZE = 4 ADVERTISEMENT_TEST_TIMEOUT = 120 STOP_ADVERTISEMENT_TEST_TIMEOUT = 2000 #2 seconds SIMPLE_CONNECTION_TEST_TIMEOUT = 120 SERVICE_DISCOVERY_TEST_TIMEOUT = 120 PAIRING_TEST_TIMEOUT = 120 GENERIC_TEST_TIMEOUT = 120 MTU_SIZE = 200 numberOfTests = 0 numberOfFailedTests = 0 # Manufacturer-specific Data # First two bytes are company ID (randomly select Espressif(741) for test purpose) # Next bytes are defined by the company (randomly select unit8_t 5 for test purpose) COMPANY_ID = 741 MANU_DATA = 5 testDevice = [] DUT_MTU_2_STRING = "a" * (MTU_SIZE - 3) DUT_LONG_STRING = ["A" * (MTU_SIZE - 3), "B" * (MTU_SIZE - 3), "C" * (MTU_SIZE - 3)] DUT_CHAR_E_STRING = "E" DUT_CHAR_F_STRING = "F" isNotificationDeclinedSuccessFull = False testResult = False @staticmethod def discoveryStoppedCb(testDevice = None): global testResult testResult = False if testDevice == None: testResult = True runTest.mainloop.quit() @staticmethod def discoveryStartedCb(testDevice): runTest.mainloop.quit() @staticmethod def discoveryEventCb(testDevice): isTestSuccessFull = runTest.advertisement(testDevice) if isTestSuccessFull == True: runTest.setTestDevice(testDevice) #discoveryEvent.set() runTest.mainloop.quit() @staticmethod def discoveryEventCb_16bit(testDevice): isTestSuccessFull = runTest.advertisement_16bit(testDevice) if isTestSuccessFull == True: runTest.setTestDevice(testDevice) #discoveryEvent.set() runTest.mainloop.quit() @staticmethod def notificationCb(uuid, value, flag): isNotificationTestSuccessFull = runTest.notification(uuid, value, flag) if isNotificationTestSuccessFull == True: #notificationEvent.set() runTest.mainloop.quit() @staticmethod def indicationCb(uuid, value, flag): isIndicationTestSuccessFull = runTest.indication(uuid, value, flag) if isIndicationTestSuccessFull == True: #indicationEvent.set() runTest.mainloop.quit() @staticmethod def notificationMTUCb(uuid, value, flag): notification = runTest.notificationMTU2(uuid, value, flag) if notification == runTest.DUT_FAIL_STRING: runTest.mainloop.quit() runTest.isNotificationDeclinedSuccessFull = True if notification == runTest.DUT_MTU_2_STRING: runTest.mainloop.quit() runTest.isNotificationDeclinedSuccessFull = False @staticmethod def errorConnectCb(): print("Connection error") sys.stdout.flush() connectEvent.put(0) @staticmethod def stopAdvertisement(scan_filter): #Do one cycle of discovery to remove cached messages. timerHandle = GObject.timeout_add(runTest.STOP_ADVERTISEMENT_TEST_TIMEOUT, runTest.discoveryStoppedCb) bleAdapter.setDiscoveryFilter(scan_filter) bleAdapter.startDiscovery(runTest.discoveryStoppedCb)#wait for DUT to start advertising runTest.mainloop.run() bleAdapter.stopDiscovery() #All cached message have been remove. Try again a discovery. timerHandle = GObject.timeout_add(runTest.STOP_ADVERTISEMENT_TEST_TIMEOUT, runTest.discoveryStoppedCb) bleAdapter.setDiscoveryFilter(scan_filter) bleAdapter.startDiscovery(runTest.discoveryStoppedCb)#wait for DUT to start advertising runTest.mainloop.run() runTest.submitTestResult(testResult, runTest.stopAdvertisement) bleAdapter.stopDiscovery() @staticmethod def reconnectWhileNotBonded(): isTestSuccessFull = bleAdapter.connect(runTest.testDevice) if isTestSuccessFull == False: print("reconnectWhileNotBonded test: Could not connect") sys.stdout.flush() runTest.submitTestResult(False, runTest.reconnectWhileNotBonded) return #Since secure connection only are accepted, pairing in "just works" shoud get rejected if bleAdapter.pair() == True: print("reconnectWhileNotBonded test: Able to pair in just Works mode") sys.stdout.flush() runTest.submitTestResult(False, runTest.reconnectWhileNotBonded) return runTest.submitTestResult(True, runTest.reconnectWhileNotBonded) @staticmethod def reconnectWhileBonded(): isTestSuccessFull = bleAdapter.connect(runTest.testDevice) #since there is a bond with DUT, pairing is automatic if( isTestSuccessFull == True): isTestSuccessfull = bleAdapter.writeCharacteristic(runTest.DUT_ENCRYPT_CHAR_UUID, runTest.DUT_ENCRYPT_CHAR_UUID) runTest.submitTestResult(isTestSuccessFull, runTest.reconnectWhileBonded) @staticmethod def disconnect(): isTestSuccessFull = bleAdapter.disconnect() runTest.submitTestResult(isTestSuccessFull, runTest.disconnect) @staticmethod def pairing(): isTestSuccessFull = True if bleAdapter.isPaired() == False: bleAdapter.writeCharacteristic(runTest.DUT_ENCRYPT_CHAR_UUID, runTest.DUT_ENCRYPT_CHAR_UUID) #should trigger a pairing event isTestSuccessFull = bleAdapter.isPaired() else: isTestSuccessFull = False return isTestSuccessFull @staticmethod def _readWriteProtectedAttributes(pairingStatus): if pairingStatus == True: expectedSuccess = True else: expectedSuccess = False isTestSuccessfull = bleAdapter.writeDescriptor(runTest.DUT_ENCRYPT_DESCR_UUID, runTest.DUT_ENCRYPT_DESCR_UUID) if isTestSuccessfull != expectedSuccess: print("readWriteProtectedAttributes test: Error while reading protect descriptor, pairing status was "+ str(pairingStatus) + " Operation success was " + str(isTestSuccessfull)) sys.stdout.flush() return False isTestSuccessfull = bleAdapter.writeCharacteristic(runTest.DUT_ENCRYPT_CHAR_UUID, runTest.DUT_ENCRYPT_CHAR_UUID) if isTestSuccessfull != expectedSuccess: print("readWriteProtectedAttributes test: Error while writing protect characteristic, pairing status was "+ str(pairingStatus) + " Operation success was " + str(isTestSuccessfull)) sys.stdout.flush() return False return True # Expect writing/Reading to protect attribute to succeed. @staticmethod def readWriteProtectedAttributesWhilePaired(): isPaired = bleAdapter.isPaired() if isPaired != True: print("readWriteProtectedCharacteristicWhileNotPaired test: Expected paired:1, got:"+str(isPaired)) sys.stdout.flush() return False return runTest._readWriteProtectedAttributes(True) # Expect writing/Reading to protect attribute to fail. @staticmethod def readWriteProtectedAttributesWhileNotPaired(): isPaired = bleAdapter.isPaired() if isPaired != False: print("readWriteProtectedCharacteristicWhileNotPaired test: Expected paired:0, got:"+str(isPaired)) sys.stdout.flush() return False return runTest._readWriteProtectedAttributes(False) @staticmethod def indication(uuid, value, flag): isSuccessfull = False if (uuid == runTest.DUT_INDICATE_CHAR_UUID) and (value == runTest.DUT_GENERIC_STRING) and (flag == "indicate"): isSuccessfull = True return isSuccessfull @staticmethod def notification(uuid, value, flag): isSuccessfull = False if (uuid == runTest.DUT_NOTIFY_CHAR_UUID) and (value == runTest.DUT_GENERIC_STRING) and (flag == "notify"): isSuccessfull = True return isSuccessfull @staticmethod def notificationMTU2(uuid, value, flag): if (uuid == runTest.DUT_NOTIFY_CHAR_UUID) and (flag == "notify"): return value @staticmethod def notificationOnCharE(uuid, value, flag): isSuccessfull = False if (uuid == runTest.DUT_NOTIFY_CHAR_UUID) and (value == runTest.DUT_CHAR_E_STRING) and (flag == "notify"): isSuccessfull = True return isSuccessfull @staticmethod def indicationOnCharF(uuid, value, flag): isSuccessfull = False if (uuid == runTest.DUT_INDICATE_CHAR_UUID) and (value == runTest.DUT_CHAR_F_STRING) and (flag == "indicate"): isSuccessfull = True return isSuccessfull @staticmethod def writeWithoutResponse(): return bleAdapter.writeCharacteristic(runTest.DUT_WRITE_NO_RESP_CHAR_UUID, runTest.DUT_WRITE_NO_RESP_CHAR_UUID, False) @staticmethod def writeResultWithoutResponse(result): return bleAdapter.writeCharacteristic(runTest.DUT_WRITE_NO_RESP_CHAR_UUID, result, False) @staticmethod def _readWriteChecks(charUUID, descrUUID): bleAdapter.writeCharacteristic(charUUID, charUUID) bleAdapter.writeDescriptor(descrUUID, descrUUID) (isTestSuccessfull, charRead) = bleAdapter.readCharacteristic(charUUID) (isTestSuccessfull, descrRead) = bleAdapter.readDescriptor(descrUUID) if charRead != charUUID: isTestSuccessfull = False print("readWriteSimpleConnection test: Expected char uuid:"+charUUID+" got:"+charRead) if descrRead != descrUUID: isTestSuccessfull = False print("readWriteSimpleConnection test: Expected descr uuid:"+descrUUID+" got:"+descrRead) sys.stdout.flush() return isTestSuccessfull @staticmethod def readWriteSimpleConnection(): isTestSuccessfull = runTest._readWriteChecks(runTest.DUT_OPEN_CHAR_UUID, runTest.DUT_OPEN_DESCR_UUID) isPaired = bleAdapter.isPaired() if isPaired != False: isTestSuccessfull = False print("readWriteSimpleConnection test: Expected paired:0, got:"+str(isPaired)) sys.stdout.flush() return isTestSuccessfull @staticmethod def discoverPrimaryServices(): return bleAdapter.getPropertie(runTest.testDevice, "ServicesResolved") @staticmethod def checkProperties(gatt): isTestSuccessfull = True for uuid in runTest.DUT_CHAR.keys(): if runTest.DUT_CHAR[uuid]["Flags"] != gatt.characteristics[uuid]["Flags"]: print("checkProperties test: incorrect flags, expected: "+runTest.DUT_CHAR[uuid]["Flags"]+" was: "+gatt.characteristics[uuid]["Flags"]) isTestSuccessfull = False sys.stdout.flush() return isTestSuccessfull @staticmethod def checkUUIDs(gatt): isTestSuccessfull = True if runTest.DUT_SERVICEB_UUID not in gatt.services.keys(): print("checkUUIDs test: missing service UUID: "+runTest.DUT_SERVICEB_UUID) isTestSuccessfull = False #Check characteristics UUIDs for uuid in runTest.DUT_CHAR.keys(): if uuid not in gatt.characteristics.keys(): print("checkUUIDs test: missing characteristic UUID: "+uuid) isTestSuccessfull = False #Check descriptors for uuid in runTest.DUT_DESCR.keys(): if uuid not in gatt.descriptors.keys(): print("checkUUIDs test: missing descriptors UUID: "+uuid) isTestSuccessfull = False sys.stdout.flush() return isTestSuccessfull @staticmethod def simpleConnection(isConnected): return isConnected @staticmethod def reConnection(isConnected): return isConnected @staticmethod def removeIndication(isSuccessfull): return isSuccessfull @staticmethod def removeNotification(isSuccessfull): return isSuccessfull @staticmethod def advertisement(testDevice, DUT_UUID=None): if (DUT_UUID == None): DUT_UUID = runTest.DUT_UUID_128 if (bleAdapter.getPropertie(testDevice, "Address") == None): print("Advertisement test: Waiting for Address") sys.stdout.flush() return False UUIDs = bleAdapter.getPropertie(testDevice, "UUIDs") if (UUIDs == None): print("Advertisement test: Waiting for UUIDs") sys.stdout.flush() return False else: if (DUT_UUID not in UUIDs): print("Advertisement test: Waiting for device UUID") sys.stdout.flush() return False #Remove test for service B. Advertisement messages were too small. #Should look into improving this part if it can be done. #if (runTest.DUT_SERVICEB_UUID not in UUIDs): # print("Advertisement test: Waiting for serviceB UUID") # sys.stdout.flush() # return False name = bleAdapter.getPropertie(testDevice, "Name") if(name == None): print("Advertisement test: Waiting name") sys.stdout.flush() return False else: #Names can be cached. So the complete local name may still be in memory. Check the 4 first letter which constitutes the short name if (runTest.DUT_NAME != name[:runTest.SHORT_LOCAL_NAME_SIZE]): print("Advertisement test: name is incorrect: " + name) sys.stdout.flush() # return False if (bleAdapter.getPropertie(testDevice, "TxPower") == None): print("Advertisement test: Waiting for TxPower") sys.stdout.flush() return False if( bleAdapter.getPropertie(testDevice, "RSSI") == None): print("Advertisement test: Waiting for RSSI") sys.stdout.flush() return False return True @staticmethod def get_manufacture_data(testDevice, DUT_UUID=None): manufacture_data_dict = bleAdapter.getPropertie(testDevice, "ManufacturerData") # If manufacture data doesn't exist, return None if( manufacture_data_dict == None ): print("No Manufacture Data") sys.stdout.flush() return None # If manufacture data exists, return manufacture data else: print( "Manufacturer Specific Data: " + str(manufacture_data_dict.items()) ) sys.stdout.flush() manufacture_data = manufacture_data_dict[runTest.COMPANY_ID] return manufacture_data @staticmethod def _advertisement_start(scan_filter, UUID, discoveryEvent_Cb, bleAdapter): scan_filter.update({ "UUIDs": [UUID]}) bleAdapter.setDiscoveryFilter(scan_filter) #Discovery test bleAdapter.startDiscovery(discoveryEvent_Cb) runTest.mainloop.run() bleAdapter.stopDiscovery() @staticmethod def _simple_connect(): #Simple Connection test testDevice = runTest.getTestDevice() isTestSuccessFull = bleAdapter.connect(testDevice) time.sleep(2) #wait for connection parameters update @staticmethod def _advertisement_connection_tests(scan_filter, bleAdapter, UUID, discoveryEvent_Cb): runTest._advertisement_start(scan_filter=scan_filter, UUID=UUID, discoveryEvent_Cb=discoveryEvent_Cb, bleAdapter=bleAdapter) runTest._simple_connect() runTest.stopAdvertisement(scan_filter) bleAdapter.disconnect() testutils.removeBondedDevices() @staticmethod def Advertise_Without_Properties(scan_filter, bleAdapter): DUT_NAME_ORIGINAL = runTest.DUT_NAME runTest.DUT_NAME = "nimb" runTest._advertisement_connection_tests(scan_filter=scan_filter, bleAdapter=bleAdapter, UUID=runTest.DUT_UUID_128, discoveryEvent_Cb=runTest.discoveryEventCb) runTest.DUT_NAME = DUT_NAME_ORIGINAL return True @staticmethod def Advertise_With_Manufacture_Data(scan_filter, bleAdapter): isTestSuccessFull = True # Check when manufacture data length is 0, but pointer is valid runTest._advertisement_start(scan_filter=scan_filter, UUID=runTest.DUT_UUID_128, discoveryEvent_Cb=runTest.discoveryEventCb, bleAdapter=bleAdapter) manufacture_data = runTest.get_manufacture_data(runTest.testDevice) if manufacture_data != None: isTestSuccessFull = False runTest._simple_connect() runTest.stopAdvertisement(scan_filter) isTestSuccessFull &= bleAdapter.disconnect() testutils.removeBondedDevices() # Check when manufacture data pointer is NULL, but length is not 0 runTest._advertisement_start(scan_filter=scan_filter, UUID=runTest.DUT_UUID_128, discoveryEvent_Cb=runTest.discoveryEventCb, bleAdapter=bleAdapter) manufacture_data = runTest.get_manufacture_data(runTest.testDevice) if manufacture_data != None: isTestSuccessFull = False runTest._simple_connect() runTest.stopAdvertisement(scan_filter) isTestSuccessFull &= bleAdapter.disconnect() testutils.removeBondedDevices() # Check when manufacture data length is not 0, and pointer is valid runTest._advertisement_start(scan_filter=scan_filter, UUID=runTest.DUT_UUID_128, discoveryEvent_Cb=runTest.discoveryEventCb, bleAdapter=bleAdapter) manufacture_data = runTest.get_manufacture_data(runTest.testDevice) for data in manufacture_data: if data != runTest.MANU_DATA: isTestSuccessFull = False runTest._simple_connect() runTest.stopAdvertisement(scan_filter) isTestSuccessFull &= bleAdapter.disconnect() testutils.removeBondedDevices() return isTestSuccessFull @staticmethod def Advertise_With_16bit_ServiceUUID(scan_filter, bleAdapter): runTest._advertisement_connection_tests(scan_filter=scan_filter, bleAdapter=bleAdapter, UUID=runTest.DUT_UUID_16, discoveryEvent_Cb=runTest.discoveryEventCb_16bit) return True @staticmethod def Write_Notification_Size_Greater_Than_MTU_3(scan_filter, bleAdapter): runTest._advertisement_connection_tests(scan_filter=scan_filter, bleAdapter=bleAdapter, UUID=runTest.DUT_UUID_128, discoveryEvent_Cb=runTest.discoveryEventCb) runTest._simple_connect() runTest.stopAdvertisement(scan_filter) bleAdapter.gatt.updateLocalAttributeTable() # Data size > MTU - 3 send notification test bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID) #subscribe for next test isTestSuccessFull = True runTest.mainloop.run() isTestSuccessFull &= runTest.isNotificationDeclinedSuccessFull # unsubscribe isTestSuccessFull &= bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID, subscribe = False) #unsubscribe isTestSuccessFull &= bleAdapter.disconnect() testutils.removeBondedDevices() return isTestSuccessFull @staticmethod def advertisement_16bit(testDevice): return runTest.advertisement(testDevice, DUT_UUID=runTest.UUID_16to128(runTest.DUT_UUID_16)) @staticmethod def UUID_16to128(UUID_16bit): return "0000"+UUID_16bit+"-0000-1000-8000-00805f9b34fb" @staticmethod def setTestDevice(testDeviceTmp): runTest.testDevice = testDeviceTmp @staticmethod def getTestDevice(): return runTest.testDevice @staticmethod def submitTestResult(isSuccessfull, testMethod): switch = { runTest.advertisement: "_advertisement", runTest.discoverPrimaryServices: "_discoverPrimaryServices", runTest.simpleConnection: "_simpleConnection", runTest.reConnection: "_reConnection", runTest.checkProperties: "_checkProperties", runTest.checkUUIDs: "_checkUUIDs", runTest.readWriteSimpleConnection: "_readWriteSimpleConnection", runTest.writeWithoutResponse: "_writeWithoutResponse", runTest.notification:"_notification", runTest.indication:"_indication", runTest.removeNotification:"_removeNotification", runTest.removeIndication:"_removeIndication", runTest.readWriteProtectedAttributesWhileNotPaired:"_readWriteProtectedAttributesWhileNotPaired", runTest.readWriteProtectedAttributesWhilePaired:"_readWriteProtectedAttributesWhilePaired", runTest.pairing: "_pairing", runTest.disconnect: "_disconnect", runTest.reconnectWhileBonded: "_reconnectWhileBonded", runTest.reconnectWhileNotBonded: "_reconnectWhileNotBonded", runTest.stopAdvertisement: "_stopAdvertisement", runTest.Advertise_Without_Properties: "_Advertise_Without_Properties", runTest.Advertise_With_16bit_ServiceUUID: "_Advertise_With_16bit_ServiceUUID", runTest.Advertise_With_Manufacture_Data: "_Advertise_With_Manufacture_Data", # runTest.Advertise_Interval_Consistent_After_BT_Reset: "_Advertise_Interval_Consistent_After_BT_Reset", runTest.Write_Notification_Size_Greater_Than_MTU_3: "_Write_Notification_Size_Greater_Than_MTU_3" } runTest.numberOfTests += 1 if(isSuccessfull == True): successString = "PASS" else: successString = "FAIL" runTest.numberOfFailedTests += 1 print("TEST("+runTest.TEST_GROUP+", "+runTest.TEST_NAME_PREFIX + switch.get(testMethod, "methodNotFound")+") "+successString) sys.stdout.flush() @staticmethod def printTestsSummary(): print("-----------------------") print(str(runTest.numberOfTests)+ " Tests "+str(runTest.numberOfFailedTests)+" Failures 0 Ignored") sys.stdout.flush() ```
{ "source": "jensjacobt/randomopgaver", "score": 3 }
#### File: jensjacobt/randomopgaver/randomopgavergui.pyw ```python import os import tkinter from tkinter import * from tkinter import scrolledtext from tkinter import messagebox from tkinter.filedialog import askopenfilename from tkinter.filedialog import askdirectory from filegenerator import create_randomopgaver global doc_file, tab_file, out_dir, st, status # GUI FUNCTIONS def set_doc(): doc_file = askopenfilename( initialdir='', filetypes=(('Alle Word-dokumenter', '*.docx;*.doc'), ('Alle filer', '*.*')), title='Vælg et Word-dokument', multiple=False ) e1.delete(0, END) e1.insert(0, os.path.abspath(doc_file)) def set_tab(): tab_file = askopenfilename( initialdir='', filetypes=(('Alle Excel-ark', '*.xlsx;*.xls'), ('Alle CSV-filer', '*.csv'), ('Alle TXT-filer', '*.txt'), ("Alle filer", "*.*")), title="Vælg et Excel-ark (eller en CSV-fil)", multiple=False ) e2.delete(0, END) e2.insert(0, os.path.abspath(tab_file)) def set_out(): out_dir = askdirectory( initialdir="", title="Vælg en mappe til output" ) e3.delete(0, END) e3.insert(0, os.path.abspath(out_dir)) def generate(): global st, status if not os.path.isfile(e1.get()): error_handler( 'Fejl: Den angivne sti for Word-dokumentet er ikke korrekt.') return if not os.path.isfile(e2.get()): error_handler( 'Fejl: Den angivne sti for Excel-arket/CSV-filen er ikke korrekt.') return out_folder = (e3.get() if e3.get() else os.path.abspath(os.path.dirname(e1.get()))) st = tkinter.scrolledtext.ScrolledText( master=root, wrap=tkinter.WORD, height=15, width=5 ) st.grid(row=4, column=0, columnspan=3, padx=xpad, pady=ypad, sticky=EW) root.grid_columnconfigure(0, weight=1) st.delete(1.0, END) st.insert(1.0, 'Genererering af randomopgaver påbegyndt.') status['text'] = 'Genererer randomopgaver...' st.update() res = create_randomopgaver(write, error_handler, e1.get(), e2.get(), out_folder) if res: status['text'] = 'Randomopgaver blev genereret med succes.' def write(string): global st st.insert(END, '\n' + string) st.see(END) st.update() def error_handler(error_string): global status status['text'] = 'Der opstod en fejl under genereringen, som nu er stoppet.' if tkinter.messagebox.askretrycancel( 'Der opstod en fejl', error_string, icon=messagebox.ERROR, default=messagebox.CANCEL ): generate() # GUI root = tkinter.Tk() root.wm_title('Randomopgaver') root.resizable(width=False, height=False) # https://commons.wikimedia.org/wiki/File:One_die.jpeg root.iconbitmap(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'icon.ico')) ypad = 5 xpad = 6 l1 = Label(root, text='Word-dokument:') l2 = Label(root, text='Excel-ark:') l3 = Label(root, text='Evt. output-mappe:') status = Label(root, text='') l1.grid(row=0, sticky=W, padx=xpad) l2.grid(row=1, sticky=W, padx=xpad) l3.grid(row=2, sticky=W, padx=xpad) status.grid(row=3, columnspan=2, sticky=W, padx=xpad) e1 = Entry(root, width=40) e2 = Entry(root, width=40) e3 = Entry(root, width=40) e1.grid(row=0, column=1, pady=ypad) e2.grid(row=1, column=1, pady=ypad) e3.grid(row=2, column=1, pady=ypad) b1 = Button(root, text='Gennemse...', command=set_doc, width=14) b2 = Button(root, text='Gennemse...', command=set_tab, width=14) b3 = Button(root, text='Gennemse...', command=set_out, width=14) b4 = Button(root, text='Generer opgaver', command=generate, width=14) b1.grid(row=0, column=2, sticky=E, padx=xpad, pady=ypad) b2.grid(row=1, column=2, sticky=E, padx=xpad, pady=ypad) b3.grid(row=2, column=2, sticky=E, padx=xpad, pady=ypad) b4.grid(row=3, column=2, sticky=E, padx=xpad, pady=ypad) root.columnconfigure(1, weight=5) root.rowconfigure(1, weight=5) root.mainloop() ``` #### File: jensjacobt/randomopgaver/randomopgaver.py ```python import sys import os from filegenerator import create_randomopgaver global doc, word, doc_file, row, headers, pattern, name, output_folder def mkdir(path): if not os.path.exists(path): os.mkdir(path) def error_handler(error_string): print(error_string) sys.exit(1) def main(): # Global variables global doc, word, doc_file, row, headers, pattern, name, output_folder # Handle arguments and check existence if len(sys.argv) == 3 or len(sys.argv) == 4: doc_file = os.path.abspath(sys.argv[1]) table_file = os.path.abspath(sys.argv[2]) if not os.path.isfile(doc_file): error_handler('Fejl: Den angivne sti for Word-dokumentet er ikke korrekt.') if not os.path.isfile(table_file): error_handler('Fejl: Den angivne sti for Excel-arket/CSV-filen er ikke korrekt.') else: print('Genererer randomopgaver i PDF-format fra et Word-dokument and en tabelfil.' '\n' '\nrandomopgaver dokument tabelfil [output-mappe]' '\n' '\ndokument Stien til det Word-dokumentet hvori nogle tags skal udskiftes.' '\ntabelfil Stien til en tabelfil med tags og værdier til udskiftning.' '\n Filen kan enten være et Excel-ark eller en TXT-fil.' '\n I tilfælde af en TXT-fil anvendes separator (;), ' '\n citationstegn (") og tegnkodning UTF-16LE.' '\noutput-mappe Mappen hvor de genererede PDF-filer gemmes. Hvis ingen mappe' '\n angives, så vil PDF-filerne gemmes i undermapperne Opgaver og' '\n Svar til mappen, hvori Word-dokumentet ligger. Output-mappen' '\n oprettes, hvis den ikke findes i forvejen.' ) # | return if len(sys.argv) == 4: output_folder = sys.argv[3] else: output_folder = os.path.abspath(os.path.dirname(doc_file)) mkdir(output_folder) create_randomopgaver(print, error_handler, doc_file, table_file, output_folder) main() ```
{ "source": "jen-sjen/data-structures-basics-leetcode", "score": 4 }
#### File: data-structures-basics-leetcode/Arrays/largest-continuous-sum.py ```python def large(arr): if (len(arr)) < 0: return 0 max_sum = current = arr[0] for num in arr[1:]: current = max(current + num, num) max_sum = max(current, max_sum) return max_sum if __name__ == "__main__": print(large([1, -1, 3, -4, 5, -3, 6, -3, 2, -1])) ``` #### File: Leetcode/easy/implement-stacks-using-queues.py ```python class MyStack: def __init__(self): """ Initialize your data structure here. """ self.q = [] self.q2 = [] def push(self, x: int) -> None: """ Push element x onto stack. """ while self.q: self.q2.append(self.q.pop(0)) self.q.append(x) while self.q2: self.q.append(self.q2.pop(0)) def pop(self) -> int: """ Removes the element on top of the stack and returns that element. """ return self.q.pop(0) def top(self) -> int: """ Get the top element. """ return self.q[0] def empty(self) -> bool: """ Returns whether the stack is empty. """ return len(self.q) == 0 # Your MyStack object will be instantiated and called as such: # obj = MyStack() # obj.push(x) # param_2 = obj.pop() # param_3 = obj.top() # param_4 = obj.empty() ``` #### File: Leetcode/easy/majority-element.py ```python class Solution: def majorityElement(self, nums) -> int: candidate = nums[0] count = 0 for x in nums: if candidate == x: count += 1 else: if count != 0: count -= 1 else: candidate = x count = 0 return candidate ``` #### File: Leetcode/easy/number-of-1-bits.py ```python class Solution: def hammingWeight(self, n: int) -> int: return bin(n).count("1") ``` #### File: Leetcode/easy/palindrome-linked-list.py ```python class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next class Solution: def isPalindrome(self, head: ListNode) -> bool: slow = head fast = head while fast and fast.next: slow = slow.next fast = fast.next.next parent = None cur = head while cur != slow: temp = cur.next cur.next = parent parent = cur cur = temp if fast and not fast.next: slow = slow.next while parent: if parent.val != slow.val: return False parent = parent.next slow = slow.next return True ``` #### File: Leetcode/easy/pascal's triangle.py ```python class Solution(object): def generate(self, numRows): """ :type numRows: int :rtype: List[List[int]] """ # Solution 1: rows = list() if numRows == 0: # handle base case: layer 0 return rows rows.append([1]) if numRows == 1: # handle base case: layer 1 return rows rows.append([1, 1]) if numRows == 2: # handle base case: layer 2 return rows for i in range(3, numRows+1): # here follows the problem's row numbering instead of row indices in list i_row = [None] * i # create row filled with dummy values i_row[0] = i_row[-1] = 1 # initialize the head and tail to 1 for j in range(1, i-1): # iterate through all the middle entries (non head or tail) i_row[j] = rows[i-1-1][j-1]+rows[i-1-1][j] # -1 twice to handle row i has index (i-1) and to refer to previous row rows.append(i_row) return rows ``` #### File: Leetcode/easy/search-insert-position.py ```python def searchInsert(self, nums, target: int) -> int: for i, x in enumerate(nums): if target <= x: return i return len(nums) ``` #### File: Leetcode/easy/single-number.py ```python class Solution: def singleNumber(self, nums) -> int: res = 0 for x in nums: res = res ^ x return res ``` #### File: Leetcode/medium/3sum-closest.py ```python def threeSumClosest(nums, target): res = nums[0] + nums[1] + nums[2] nums = sorted(nums) for i in range(len(nums)-2): left = i + 1 right = len(nums) - 1 while left < right: sums = nums[i] + nums[left] + nums[right] if sums < target: left += 1 else: right -= 1 if abs(sums - target) < abs(res - target): res = sums return res ``` #### File: Leetcode/medium/all-paths-from-source-to-target.py ```python class Solution: def allPathsSourceTarget(self, graph): paths = [] currentPath = [0] self.dfs(graph, 0, currentPath, paths) return paths def dfs(self, graph, node, currentPath, paths): if node == len(graph) - 1: paths.append(currentPath[:]) return for neighbor in graph[node]: currentPath.append(neighbor) self.dfs(graph, neighbor, currentPath, paths) currentPath.pop() return ``` #### File: Leetcode/medium/binary-tree-from-postorder-and-inorder.py ```python class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right class Solution: def buildTree(self, inorder, postorder) -> TreeNode: if len(postorder) == 0: return None return self.tree(postorder, inorder) def tree(self, post, inorder): if len(post) < 1: return None if len(post) == 1: return TreeNode(post[0], None, None) root = post[-1] index = inorder.index(root) x = self.tree(post[:index], inorder[:index]) y = self.tree(post[index:len(post)-1], inorder[index+1:]) return TreeNode(root, x, y) ``` #### File: Leetcode/medium/bitwise-and-of-numbers-range.py ```python class Solution: def rangeBitwiseAnd(self, m: int, n: int) -> int: count = 0 while m < n: m = m >> 1 n = n >> 1 count += 1 return m << count ``` #### File: Leetcode/medium/combination-sum-2.py ```python def combinationSum2(candidates, target): candidates.sort() return combo(candidates, target) def combo(candidates, target): if len(candidates) == 0 or target < min(candidates): return [] result = [] if target in candidates: result.append([target]) for i, x in enumerate(candidates): if i > 0 and x == candidates[i - 1]: continue y = combo(candidates[i+1:], target - x) if len(y) == 0: continue for t in y: t.append(x) result.append(t) return result ``` #### File: Leetcode/medium/decode-ways.py ```python class Solution(object): def __init__(self): self.memo = {} def numDecodings(self, s): """ :type s: str :rtype: int """ if not s or s == "0": return 0 return self.findCombinations(s, 0) def findCombinations(self, s, index): if index == len(s): return 1 if s[index] == "0": return 0 if index == len(s) - 1: return 1 if index in self.memo: return self.memo[index] if int(s[index:index+2]) <= 26: output = self.findCombinations(s, index+1) + self.findCombinations(s, index+2) self.memo[index] = output return output else: output = self.findCombinations(s, index+1) self.memo[index] = output return output ``` #### File: Leetcode/medium/divide-two-integers.py ```python def divide(dividend, divisor): quot = 0 neg = 1 if dividend < 0 and divisor > 0: neg = -1 if dividend > 0 and divisor < 0: neg = -1 divisor, dividend = abs(divisor), abs(dividend) div = divisor divi = dividend power = 1 while divi > div: divi -= div power += power div += div while power >= 1: if dividend >= div: dividend -= div quot += power power = power >> 1 div = div >> 1 quot = (quot) * neg if quot < -2147483648: quot = -2147483648 if quot > 2147483647: quot = 2147483647 return quot ``` #### File: Leetcode/medium/elimination-game.py ```python class Solution: def lastRemaining(self, n: int) -> int: k = 0 start = 1 left = True while n > 1: # print("k:",k,"start:",start,"n:",n) if left: start = start + pow(2, k) left = False else: if n % 2 != 0: start = start + pow(2, k) left = True k += 1 n = n // 2 return start ``` #### File: Leetcode/medium/find-the-duplicate-number.py ```python class Solution: def findDuplicate(self, nums) -> int: # INEFFICIENT SOLUTION nums.sort() for i in range(len(nums)): if nums[i] == nums[i + 1]: return nums[i] # EFFICIENT SOLUTION O(N) TIME AND O(1) SPACE """def findDuplicate(self, nums): # Find the intersection point of the two runners. tortoise = hare = nums[0] while True: tortoise = nums[tortoise] hare = nums[nums[hare]] if tortoise == hare: break # Find the "entrance" to the cycle. tortoise = nums[0] while tortoise != hare: tortoise = nums[tortoise] hare = nums[hare] return hare""" ``` #### File: Leetcode/medium/fraction-to-recurring-decimal.py ```python class Solution: def fractionToDecimal(self, numerator: int, denominator: int) -> str: pos = False if numerator * denominator >= 0: pos = True numerator = abs(numerator) denominator = abs(denominator) quot = numerator // denominator remainder = numerator % denominator if remainder == 0: if pos: return (str)(quot) else: return "-" + (str)(quot) frac = "" rem = [] i = -1 while True: remainder *= 10 q = remainder // denominator frac += str(q) if remainder in rem: i = rem.index(remainder) break rem.append(remainder) remainder = remainder % denominator if remainder == 0: break res = "" if i == -1: res = str(quot) + "." + frac else: res = str(quot) + "." + frac[:i] + "(" + frac[i:-1] + ")" print(pos) if pos == False: res = "-" + res return res ``` #### File: Leetcode/medium/implement-trie.py ```python class TrieNode: def __init__(self, letter): self.letter = letter self.children = {} self.isEnd = False class Trie: def __init__(self): """ Initialize your data structure here. """ self.root = TrieNode("*") def insert(self, word: str) -> None: """ Inserts a word into the trie. """ cur = self.root for let in word: if let not in cur.children: cur.children[let] = TrieNode(let) cur = cur.children[let] cur.isEnd = True def search(self, word: str) -> bool: """ Returns if the word is in the trie. """ cur = self.root for let in word: if let not in cur.children: return False cur = cur.children[let] return cur.isEnd def startsWith(self, prefix: str) -> bool: """ Returns if there is any word in the trie that starts with the given prefix. """ cur = self.root for let in prefix: if let not in cur.children: return False cur = cur.children[let] return True # Your Trie object will be instantiated and called as such: # obj = Trie() # obj.insert(word) # param_2 = obj.search(word) # param_3 = obj.startsWith(prefix) ``` #### File: Leetcode/medium/inorder-traversal.py ```python class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right class Solution: def __init__(self): self.res = [] def inorderTraversal(self, root: TreeNode): self.inorder(root) return self.res def inorder(self, root): if root: self.inorder(root.left) self.res.append(root.val) self.inorder(root.right) ``` #### File: Leetcode/medium/integer-break.py ```python class Solution: def integerBreak(self, n: int) -> int: if n == 0: return 0 if n == 1: return 1 if n == 2: return 1 if n == 3: return 2 # 0 to 3 are special cases beacuse they will produce a result less than their value. We can't use that lesser value in the other calculations known = {0: 0, 1: 1, 2: 2, 3: 3} return self.breakDown(n, known) def breakDown(self, n, known): if n in known: return known[n] else: maximum = 0 for x in range(1, n // 2 + 1): p1 = self.breakDown(x, known) p2 = self.breakDown(n - x, known) maximum = max(maximum, p1 * p2) known[n] = maximum return known[n] ``` #### File: Leetcode/medium/largest-divisible-subset.py ```python class Solution: def largestDivisibleSubset(self, nums): nums.sort() n=len(nums) if n==0: return [] dp=[[i,1] for i in range(n)] last=0 maxm=0 for i in range(1,n): for j in range(i-1,-1,-1): if nums[i]%nums[j]==0 and dp[j][1]>=dp[i][1]: dp[i][1]=dp[j][1]+1 dp[i][0]=j if maxm<dp[i][1]: maxm=dp[i][1] last=i res=[] while dp[last][0]!=last: res.append(nums[last]) last=dp[last][0] res.append(nums[last]) res.reverse() return res ``` #### File: Leetcode/medium/largest-number.py ```python class LargerNumKey(str): """def __lt__(x, y): return x+y > y+x""" class Solution: def largestNumber(self, nums): largest_num = ''.join(sorted(map(str, nums), key=LargerNumKey)) return '0' if largest_num[0] == '0' else largest_num ``` #### File: Leetcode/medium/majority-element-II.py ```python class Solution: def majorityElement(self, nums): cand1 = None cand2 = None count1 = 0 count2 = 0 for num in nums: if num == cand1: count1 += 1 elif num == cand2: count2 += 1 elif count1 == 0: cand1 = num count1 += 1 elif count2 == 0: cand2 = num count2 += 1 else: count1 -= 1 count2 -= 1 count1 = 0 count2 = 0 for num in nums: if num == cand1: count1 += 1 elif num == cand2: count2 += 1 ans = [] if count1 > len(nums)/3: ans.append(cand1) if count2 > len(nums)/3: ans.append(cand2) return ans ``` #### File: Leetcode/medium/maximal-square.py ```python class Solution: def maximalSquare(self, matrix: List[List[str]]) -> int: m = len(matrix) n = len(matrix[0]) table = [[0 for _ in range(n)] for _ in range(m)] maximum = 0 for i in range(m): table[i][0] = int(matrix[i][0]) maximum = max(maximum, table[i][0]) for i in range(n): table[0][i] = int(matrix[0][i]) maximum = max(maximum, table[0][i]) for i in range(1, m): for j in range(1, n): if matrix[i][j] == '0': continue table[i][j] = min(table[i - 1][j], table[i][j - 1], table[i - 1][j - 1]) + 1 maximum = max(maximum, table[i][j]) for row in table: print(row) return maximum ** 2 ``` #### File: Leetcode/medium/maximum-product-subarray.py ```python class Solution: def maxProduct(self, nums) -> int: maxi = nums[0] mini = nums[0] final = nums[0] for x in nums[1:]: temp = maxi maxi = max(max(maxi * x, mini * x), x) mini = min(min(temp * x, mini * x), x) final = max(final, maxi) return final ``` #### File: Leetcode/medium/minimum-path-sum.py ```python def minPathSum(grid): m = len(grid) n = len(grid[0]) known = [[None for _ in range(0, n)] for _ in range(0, m)] return minSum(0, 0, m, n, known, grid) def minSum(posx, posy, m, n, known, grid): if posx == m - 1 and posy == n - 1: known[posx][posy] = grid[posx][posy] return grid[posx][posy] if posx >= m or posy >= n: return float('inf') if known[posx][posy] != None: return known[posx][posy] sum1 = minSum(posx + 1, posy, m, n, known, grid) sum2 = minSum(posx, posy + 1, m, n, known, grid) known[posx][posy] = grid[posx][posy] + min(sum1, sum2) return known[posx][posy] """ # MODIFYING GRID IN PLACE FOR LESSER SPACE COMPLEXITY def minPathSum(self, grid: List[List[int]]) -> int: # the idea is to start from the right-bottom and come up with BFS? # or.. same thing going from left-top to bottom-right.. all sub-matrices are sub-problems of the same problem formulation # start from left-top and just add, O(1) space, use the given grid # O(mn) time for i in range(len(grid)): for j in range(len(grid[0])): if i == 0 and j == 0: # if in starting position continue if i - 1 < 0: # if on the top row grid[i][j] += grid[i][j-1] elif j - 1 < 0: # if on the leftmost column grid[i][j] += grid[i-1][j] else: # otherwise grid[i][j] += min(grid[i-1][j], grid[i][j-1]) return grid[-1][-1] """ ``` #### File: Leetcode/medium/minimum-size-subarray-sum.py ```python class Solution: def minSubArrayLen(self, s: int, nums) -> int: minimal = float('inf') left = 0 right = 0 sums = 0 while right < len(nums): sums += nums[right] while sums >= s: if right - left + 1 < minimal: minimal = min(minimal, right - left + 1) sums -= nums[left] left += 1 right += 1 return 0 if minimal == float('inf') else minimal ``` #### File: Leetcode/medium/multiply-two-integers.py ```python def multiply(num1: str, num2: str) -> str: result = "0" k = 0 for x in num2[::-1]: """ # MULTIPLYING IN BRUTE FORCE res = "" carry = 0 for y in num1[::-1]: t = (int(x) * int(y)) + carry res = str(t % 10) + res carry = t // 10 if carry > 0: res = str(carry) + res """ res = str(int(x) * int(num1)) res += "0" * k k += 1 result = str(int(res) + int(result)) """ # ADDING IN BRUTE FORCE m = max(len(res), len(result)) carry2 = 0 s = "" while m > 0: a, b = 0, 0 if len(result) == 0: a = 0 else: a = int(result[-1]) result = result[:-1] if len(res) == 0: b = 0 else: b = int(res[-1]) res = res[:-1] temp = a + b + carry2 carry2 = temp // 10 s = str(temp % 10) + s m -= 1 if carry2 > 0: s = str(carry2) + s result = str(int(s) """ return result ``` #### File: Leetcode/medium/next-permutation.py ```python def nextPermutation(nums): """ Do not return anything, modify nums in-place instead. """ if len(nums) == 1: return i = len(nums) - 2 while i >= 0: if nums[i] >= nums[i + 1]: i -= 1 continue else: pos = i + 1 mini = nums[i + 1] pos2 = i + 1 while pos < len(nums): if nums[pos] < mini and nums[pos] > nums[i]: mini = nums[pos] pos2 = pos pos += 1 t = nums[i] nums[i] = nums[pos2] nums[pos2] = t nums[i+1:] = sorted(nums[i+1:]) return nums.sort() return ``` #### File: Leetcode/medium/number-of-islands.py ```python class Solution: def numIslands(self, grid) -> int: count = 0 for i in range(len(grid)): for j in range(len(grid[0])): if grid[i][j] == "1": count += 1 self.traverse(grid, i, j) return count def traverse(self, grid, i, j): #print("i:", i, "j:", j) if i not in range(len(grid)) or j not in range(len(grid[0])): return if grid[i][j] == "0" or grid[i][j] == "K": return grid[i][j] = "K" self.traverse(grid, i + 1, j) self.traverse(grid, i - 1, j) self.traverse(grid, i, j + 1) self.traverse(grid, i, j - 1) ``` #### File: Leetcode/medium/pow.py ```python def myPow(x, n): if n == 0: return 1 elif n < 0: return 1 / myPow(x, abs(n)) elif n % 2 != 0: return x * myPow(x, n-1) else: return myPow(x*x, n/2) ``` #### File: Leetcode/medium/product-of-array-except-self.py ```python class Solution: def productExceptSelf(self, nums): left = [None] * len(nums) left[0] = 1 for i in range(1, len(nums)): left[i] = nums[i - 1] * left[i - 1] r = 1 for i in range(len(nums) - 1, -1, -1): left[i] = left[i] * r r = r * nums[i] return left ``` #### File: Leetcode/medium/rectangle-area.py ```python class Solution: def computeArea(self, A: int, B: int, C: int, D: int, E: int, F: int, G: int, H: int) -> int: areaA = (C - A) * (D - B) areaB = (G - E) * (H - F) common = 0 if C <= E or A >= G: common = 0 elif B >= H or D <= F: common = 0 else: x = [A, C, E, G] y = [B, F, D, H] x.sort() y.sort() common = (x[2] - x[1]) * (y[2] - y[1]) return areaA + areaB - common ``` #### File: Leetcode/medium/remove-nth-node-from-end.py ```python class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next class Solution: def removeNthFromEnd(self, head, n): front = head parent = None end = head while n != 0: end = end.next n -= 1 while end != None: end = end.next parent = front front = front.next if parent == None: return head.next parent.next = front.next return head ``` #### File: Leetcode/medium/reverse-words-in-a-string.py ```python class Solution: def reverseWords(self, s: str) -> str: temp = "" res = "" s += " " for x in s: if x != " ": temp += x else: if temp != "": res = temp + " " + res temp = "" return res[:-1] ``` #### File: Leetcode/medium/set-matrix-zeros.py ```python def setZeroes(matrix): """ Do not return anything, modify matrix in-place instead. """ is_col = False R = len(matrix) C = len(matrix[0]) for i in range(R): # Since first cell for both first row and first column is the same i.e. matrix[0][0] # We can use an additional variable for either the first row/column. # For this solution we are using an additional variable for the first column # and using matrix[0][0] for the first row. if matrix[i][0] == 0: is_col = True for j in range(1, C): # If an element is zero, we set the first element of the corresponding row and column to 0 if matrix[i][j] == 0: matrix[0][j] = 0 matrix[i][0] = 0 # Iterate over the array once again and using the first row and first column, update the elements. for i in range(1, R): for j in range(1, C): if not matrix[i][0] or not matrix[0][j]: matrix[i][j] = 0 # See if the first row needs to be set to zero as well if matrix[0][0] == 0: for j in range(C): matrix[0][j] = 0 # See if the first column needs to be set to zero as well if is_col: for i in range(R): matrix[i][0] = 0 ``` #### File: Leetcode/medium/sort-colors.py ```python def sortColors(nums) -> None: """ Do not return anything, modify nums in-place instead. """ dic = {0: 0, 1: 0, 2: 0} for x in nums: if x == 0: dic[0] += 1 elif x == 1: dic[1] += 1 else: dic[2] += 1 nums.clear() nums.extend([0 for _ in range(dic[0])]) nums.extend([1 for _ in range(dic[1])]) nums.extend(2 for _ in range(dic[2])) ``` #### File: Leetcode/medium/spiral-matrix-II.py ```python def generateMatrix(n: int): if n == 0: return [] if n == 1: return [[1]] matrix = [] for i in range(n): matrix.append([]) for j in range(n): matrix[i].append(0) t = 0 m = 1 while n > 1: i = t for j in range(t, n): matrix[i][j] = m m += 1 j = n - 1 for i in range(t+1, n): matrix[i][j] = m m += 1 i = n - 1 for j in range(n - 2, t-1, -1): matrix[i][j] = m m += 1 j = t for i in range(n - 2, t, -1): matrix[i][j] = m m += 1 t += 1 n -= 1 return matrix ``` #### File: Leetcode/medium/subarray-sum-equals-k.py ```python class Solution: # Sliding window solution but doesn't work for some negative values """def subarraySum(self, nums: List[int], k: int) -> int: sum = 0 i, j = 0, 0 c = 0 while j < len(nums): print("i:",i,"j:",j,"sum:",sum) if sum + nums[j] > k: if i >= j: j += 1 continue sum -= nums[i] i += 1 elif sum + nums[j] == k: c += 1 sum += nums[j] j += 1 else: sum += nums[j] j += 1 return c""" # works for all def subarraySum(self, nums, k: int) -> int: # Use defaultdict map = {} map[0] = 1 count, sum = 0, 0 for num in nums: sum += num count += map[sum - k] map[sum] += 1 return count ``` #### File: Leetcode/medium/swap-nodes-in-pairs.py ```python def swapPairs(head): if head == None or head.next == None: return head parent, front = None, head back = head.next res = back while back and front: temp = back.next if parent: parent.next = back back.next = front front.next = temp parent = front front = parent.next if front != None: back = parent.next.next return res ``` #### File: Leetcode/medium/validate-binary-search-tree.py ```python class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right class Solution: def isValidBST(self, root: TreeNode) -> bool: if not root: return True el = float('-inf') cur = root s = [] while True: if cur != None: s.append(cur) cur = cur.left elif len(s) == 0: break else: cur = s.pop() if cur.val <= el: return False el = cur.val cur = cur.right return True ``` #### File: data-structures-basics-leetcode/Linked List/print-nth-to-last-node.py ```python class Node(object): def __init__(self, value): self.value = value self.next = None def reverse(head): prev = None cur = head while cur != None: temp = cur.next cur.next = prev prev = cur cur = temp return prev def printntolast(val, head): head = reverse(head) cur = head while val > 1: cur = cur.next val -= 1 return cur ############################################### #### SOLUTION WITHOUT REVERSING """ def ntolast(n, head): left = head right = head for i in range(n): if not right.next: raise LookupError('ERROR: n is larger than list') right = right.next while right.next: left = left.next right = right.next return left """ ################################################ if __name__ == "__main__": a = Node(1) b = Node(2) c = Node(3) d = Node(4) a.next = b b.next = c c.next = d print(printntolast(1, a).value) ``` #### File: data-structures-basics-leetcode/Recursion/combination.py ```python def combination(s): out = set() if len(s) == 1: out = set(s) for i, word in enumerate(s): for p in combination(s[:i] + s[i+1:]): out.add(p) out.add(''.join(sorted(word + p))) return out if __name__ == "__main__": print(sorted(combination("abcd"), key=lambda item: (len(item), item))) ``` #### File: data-structures-basics-leetcode/Recursion/fibonacci.py ```python n = 10 cache = [None] * (n + 1) def fib(n): if n == 0 or n == 1: return n if cache[n] != None: return cache[n] cache[n] = fib(n - 1) + fib(n - 2) return cache[n] if __name__ == "__main__": print(fib(10)) ``` #### File: data-structures-basics-leetcode/Searching-and-sorting/binary-search.py ```python def binary_search_iter(arr, target): left, right = 0, len(arr) - 1 while left < right: mid = (left + right) // 2 if arr[mid] == target: return True elif arr[mid] > target: right = mid - 1 else: left = mid + 1 return False def binary_search_rec(arr, target): left, right = 0, len(arr) if len(arr) < 1: return False mid = (left + right) // 2 if arr[mid] == target: return True elif target < arr[mid]: binary_search_rec(arr[:mid], target) else: binary_search_rec(arr[mid + 1:], target) return False if __name__ == "__main__": print(binary_search_iter([1, 2, 3, 4, 5, 6], 5)) print(binary_search_iter([1, 2, 5, 6], 8)) print(binary_search_rec([1, 2, 5, 6], 3)) print(binary_search_rec([1, 2, 5, 6], 5)) ``` #### File: data-structures-basics-leetcode/Trees/implementing-bst.py ```python class Node(object): def __init__(self, val, parent=None): self.val = val self.leftChild = None self.rightChild = None self.parent = parent def getLeftChild(self): return self.leftChild def getRightChild(self): return self.rightChild def getParent(self): return self.parent def isRoot(self): return self.parent != None def hasLeftChild(self): return self.leftChild != None def hasRightChild(self): return self.rightChild != None def hasBothChildren(self): return self.hasLeftChild() and self.hasRightChild() def hasOneChild(self): return self.hasLeftChild() or self.hasRightChild() class BinarySearchTree(object): def __init__(self): self.size = 0 self.root = None def insert(self, val): if not self.root: self.root = Node(val) else: temp = self.root done = False while not done: if val < temp.val: if temp.hasLeftChild(): temp = temp.getLeftChild() else: temp.leftChild = Node(val, temp) done = True else: if temp.hasRightChild(): temp = temp.getRightChild() else: temp.rightChild = Node(val, temp) done = True self.size += 1 def length(self): return self.size def inorder(self, root): if root: self.inorder(root.leftChild) print(root.val, end=" ") self.inorder(root.rightChild) def get(self, val): if not self.root: return None temp = self.root while True: if temp.val == val: return temp elif val < temp.val: if temp.hasLeftChild(): temp = temp.leftChild else: return None else: if temp.hasRightChild(): temp = temp.rightChild else: return None def findSuccessor(self, node): current = node while current.hasLeftChild(): current = current.leftChild return current def delete(self, val): if not self.root: print("Empty tree.") return if self.get(val) == None: print("No such node.") return temp = self.get(val) if self.length == 1 and temp == self.root: self.root = None return x = temp.parent if not temp.hasOneChild(): print("Inside no child case.") if x.leftChild == temp: # print("No child and left child of parent.") x.leftChild = None else: x.rightChild = None elif not temp.hasBothChildren(): print("Inside one child case.") if temp.hasLeftChild(): if x.leftChild == temp: x.leftChild = temp.leftChild else: x.rightChild = temp.leftChild temp.leftChild.parent = x else: if x.leftChild == temp: x.leftChild = temp.rightChild else: x.rightChild = temp.rightChild temp.rightChild.parent = x else: print("Inside both child case.") successor = self.findSuccessor(temp.rightChild) if successor: print("Successor:", successor.val) else: print("No successor") self.delete(successor.val) temp.val = successor.val """ succ_parent = successor.parent if succ_parent.leftChild == successor: succ_parent.leftChild = successor.rightChild else: succ_parent.rightChild = successor.rightChild if x.leftChild == temp: x.leftChild = successor else: x.rightChild = successor successor.leftChild = temp.leftChild successor.rightChild = temp.rightChild """ if __name__ == "__main__": b = BinarySearchTree() b.insert(25) b.insert(15) b.insert(8) b.insert(20) b.insert(17) b.insert(16) b.insert(18) b.insert(40) b.insert(35) b.insert(46) b.insert(42) b.insert(50) b.inorder(b.root) print("Length:", b.length()) print() b.delete(17) b.inorder(b.root) ```
{ "source": "jens-j/switcheo-python", "score": 3 }
#### File: switcheo-python/switcheo/streaming_client.py ```python from socketio import ClientNamespace as SocketIOClientNamespace from operator import itemgetter from switcheo.utils import stringify_message, sha1_hash_digest import threading class OrderBooksNamespace(SocketIOClientNamespace): def __init__(self): self.lock = threading.Lock() self.namespace = '/v2/books' self.order_book = {} SocketIOClientNamespace.__init__(self, namespace=self.namespace) def on_connect(self): pass def on_disconnect(self): pass def on_join(self): pass def on_all(self, data): self.lock.acquire() self.order_book[data["room"]["pair"]] = data self.lock.release() digest_hash = data["digest"] book = data["book"] book_digest_hash = sha1_hash_digest(stringify_message(book)) if digest_hash != book_digest_hash: self.emit(event="leave", data=data["room"], namespace='/v2/books') self.emit(event="join", data=data["room"], namespace='/v2/books') def on_updates(self, data): update_digest = data["digest"] update_pair = data["room"]["pair"] update_events = data["events"] buy_event = False sell_event = False if "symbol" in self.order_book[update_pair]["book"]: del self.order_book[update_pair]["book"]["symbol"] self.lock.acquire() for event in update_events: price_match = False event_iteration = 0 if event["side"] == "buy": event_side = "buys" buy_event = True elif event["side"] == "sell": event_side = "sells" sell_event = True event_price = event["price"] event_change = event["delta"] for side in self.order_book[update_pair]["book"][event_side]: if side["price"] == event_price: price_match = True updated_amount = int(side["amount"]) + int(event_change) if updated_amount == 0: self.order_book[update_pair]["book"][event_side].remove(side) else: updated_book = {} updated_book["amount"] = str(updated_amount) updated_book["price"] = str(event_price) self.order_book[update_pair]["book"][event_side][event_iteration] = updated_book break event_iteration += 1 if not price_match: new_book = {} new_book["amount"] = event_change new_book["price"] = event_price self.order_book[update_pair]["book"][event_side].append(new_book) if buy_event and sell_event: self.order_book[update_pair]["book"]["buys"] = sorted( self.order_book[update_pair]["book"]["buys"], key=itemgetter("price"), reverse=True) self.order_book[update_pair]["book"]["sells"] = sorted( self.order_book[update_pair]["book"]["sells"], key=itemgetter("price"), reverse=True) elif buy_event: self.order_book[update_pair]["book"]["buys"] = sorted( self.order_book[update_pair]["book"]["buys"], key=itemgetter("price"), reverse=True) elif sell_event: self.order_book[update_pair]["book"]["sells"] = sorted( self.order_book[update_pair]["book"]["sells"], key=itemgetter("price"), reverse=True) book = self.order_book[update_pair]["book"] self.lock.release() book_digest_hash = sha1_hash_digest(stringify_message(book)) if update_digest != book_digest_hash: self.emit(event="leave", data=data["room"], namespace='/v2/books') self.emit(event="join", data=data["room"], namespace='/v2/books') class TradeEventsNamespace(SocketIOClientNamespace): def __init__(self): self.lock = threading.Lock() self.namespace = '/v2/trades' self.trade_events = {} SocketIOClientNamespace.__init__(self, namespace=self.namespace) def on_connect(self): pass def on_disconnect(self): pass def on_join(self): pass def on_all(self, data): self.lock.acquire() self.trade_events[data["room"]["pair"]] = data self.lock.release() digest_hash = data["digest"] trades = data["trades"] trade_digest_hash = sha1_hash_digest(stringify_message(trades)) if digest_hash != trade_digest_hash: self.emit(event="leave", data=data["room"], namespace='/v2/trades') self.emit(event="join", data=data["room"], namespace='/v2/trades') def on_updates(self, data): update_digest = data["digest"] update_pair = data["room"]["pair"] update_events = data["events"] update_limit = data["limit"] self.lock.acquire() self.trade_events[update_pair]["trades"] = update_events + \ self.trade_events[update_pair]["trades"] trade_slice = update_limit - 1 self.trade_events[update_pair]["trades"] = self.trade_events[update_pair]["trades"][0:trade_slice] trades = self.trade_events[update_pair]["trades"] self.lock.release() trade_digest_hash = sha1_hash_digest(stringify_message(trades)) if update_digest != trade_digest_hash: self.emit(event="leave", data=data["room"], namespace='/v2/trades') self.emit(event="join", data=data["room"], namespace='/v2/trades') class OrderEventsNamespace(SocketIOClientNamespace): def __init__(self): self.lock = threading.Lock() self.namespace = '/v2/orders' self.order_events = {} SocketIOClientNamespace.__init__(self, namespace=self.namespace) def on_connect(self): pass def on_disconnect(self): pass def on_join(self): pass def on_all(self, data): self.lock.acquire() self.order_events = data self.lock.release() def on_updates(self, data): update_events = data["events"] self.lock.acquire() self.order_events["orders"] + update_events self.lock.release() ```
{ "source": "jens-k/sleap", "score": 3 }
#### File: gui/dialogs/filedialog.py ```python import os, sys from PySide2 import QtWidgets class FileDialog(object): @classmethod def open(cls, *args, **kwargs): """ Wrapper for `QFileDialog.getOpenFileName()` Uses non-native file dialog if USE_NON_NATIVE_FILE env var set. Passes along everything except empty "options" arg. """ cls._non_native_if_set(kwargs) return QtWidgets.QFileDialog.getOpenFileName(*args, **kwargs) @classmethod def openMultiple(cls, *args, **kwargs): """ Wrapper for `QFileDialog.getOpenFileNames()` Uses non-native file dialog if USE_NON_NATIVE_FILE env var set. Passes along everything except empty "options" arg. """ cls._non_native_if_set(kwargs) return QtWidgets.QFileDialog.getOpenFileNames(*args, **kwargs) @classmethod def save(cls, *args, **kwargs): """Wrapper for `QFileDialog.getSaveFileName()` Uses non-native file dialog if USE_NON_NATIVE_FILE env var set. Passes along everything except empty "options" arg. """ cls._non_native_if_set(kwargs) return QtWidgets.QFileDialog.getSaveFileName(*args, **kwargs) @classmethod def openDir(cls, *args, **kwargs): """Wrapper for `QFileDialog.getExistingDirectory()` Uses non-native file dialog if USE_NON_NATIVE_FILE env var set. Passes along everything except empty "options" arg. """ return QtWidgets.QFileDialog.getExistingDirectory(*args, **kwargs) @staticmethod def _non_native_if_set(kwargs): is_linux = sys.platform.startswith("linux") env_var_set = os.environ.get("USE_NON_NATIVE_FILE", False) if is_linux or env_var_set: kwargs["options"] = kwargs.get("options", 0) kwargs["options"] |= QtWidgets.QFileDialog.DontUseNativeDialog # Make sure we don't send empty options argument if "options" in kwargs and not kwargs["options"]: del kwargs["options"] ``` #### File: sleap/gui/state.py ```python from typing import Any, Callable, Iterable, List, Union GSVarType = str class GuiState(object): """ Class for passing persistent gui state variables. Arbitrary variables can be set, bools can be toggled, and callbacks can be automatically triggered on variable changes. This allows us to separate controls (which set state variables) and views (which can update themselves when the relevant state variables change). """ def __init__(self): self._state_vars = dict() self._callbacks = dict() def __getitem__(self, item): return self.get(item, default=None) def __setitem__(self, key: GSVarType, value): old_val = self.get(key, default=object()) self._state_vars[key] = value if old_val != value: self.emit(key) def __contains__(self, item): return item in self._state_vars def __delitem__(self, key: GSVarType): if key in self: del self._state_vars[key] def get(self, key: GSVarType, **kwargs): """Getter with support for default value.""" if "default" in kwargs: return self._state_vars.get(key, kwargs["default"]) return self._state_vars.get(key) def set(self, key: GSVarType, value: Any): """Functional version of setter (for use in lambdas).""" self[key] = value def toggle(self, key: GSVarType, default: bool = False): self[key] = not self.get(key, default=default) def increment(self, key: GSVarType, step: int = 1, mod: int = 1, default: int = 0): if key not in self._state_vars: self[key] = default else: self[key] = (self.get(key) + step) % mod def increment_in_list( self, key: GSVarType, value_list: list, reverse: bool = False ): if self[key] not in value_list: if reverse: self[key] = value_list[-1] else: self[key] = value_list[0] else: idx = value_list.index(self[key]) step = 1 if not reverse else -1 self[key] = value_list[(idx + step) % len(value_list)] def next_int_in_list( self, key: GSVarType, value_list: Iterable[int], reverse=False ): """Advances to subsequent (or prior) value in list. Goes to value subsequent (or prior) to current value, regardless of whether current value is member of list. """ raise NotImplementedError("next_int_in_list not yet implemented!") def connect(self, key: GSVarType, callbacks: Union[Callable, List[Callable]]): """Connects one or more callbacks for state variable.""" if callable(callbacks): self._connect_callback(key, callbacks) else: for callback in callbacks: self._connect_callback(key, callback) def _connect_callback(self, key: GSVarType, callback: Callable): """Connects a callback for state variable.""" if callback is None: raise ValueError("callback cannot be None!") if key not in self._callbacks: self._callbacks[key] = [] self._callbacks[key].append(callback) def emit(self, key: GSVarType): """Triggers callbacks for state variable.""" if key in self._state_vars and key in self._callbacks: val = self.get(key) for i, callback in enumerate(self._callbacks[key]): try: callback(val) except Exception as e: print(f"Error occurred during callback {i} for {key}!") print(self._callbacks[key]) print(e) ``` #### File: sleap/info/labels.py ```python import os from sleap.io.dataset import Labels def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") args = parser.parse_args() video_callback = Labels.make_video_callback([os.path.dirname(args.data_path)]) labels = Labels.load_file(args.data_path, video_search=video_callback) print(f"Labeled frames: {len(labels)}") print(f"Tracks: {len(labels.tracks)}") print(f"Video files:") total_user_frames = 0 for vid in labels.videos: lfs = labels.find(vid) first_idx = min((lf.frame_idx for lf in lfs)) last_idx = max((lf.frame_idx for lf in lfs)) tracks = {inst.track for lf in lfs for inst in lf} concurrent_count = max((len(lf.instances) for lf in lfs)) user_frames = labels.get_labeled_frame_count(vid, "user") total_user_frames += user_frames print(f" {vid.filename}") print(f" labeled frames from {first_idx} to {last_idx}") print(f" labeled frames: {len(lfs)}") print(f" user labeled frames: {user_frames}") print(f" tracks: {len(tracks)}") print(f" max instances in frame: {concurrent_count}") print(f"Total user labeled frames: {total_user_frames}") if labels.provenance: print() print(f"Provenance:") for key, value in labels.provenance.items(): print(f" {key}: {value}") if __name__ == "__main__": main() ``` #### File: sleap/info/trackcleaner.py ```python import operator from typing import Text from sleap import Labels from sleap.nn.tracking import TrackCleaner def fit_tracks(filename: Text, instance_count: int): """Wraps `TrackCleaner` for easier cli api.""" labels = Labels.load_file(filename) video = labels.videos[0] frames = labels.find(video) TrackCleaner(instance_count=instance_count).run(frames=frames) # Rebuild list of tracks labels.tracks = list( { instance.track for frame in labels for instance in frame.instances if instance.track } ) labels.tracks.sort(key=operator.attrgetter("spawned_on", "name")) # Save new file save_filename = filename save_filename = save_filename.replace(".slp", ".cleaned.slp") save_filename = save_filename.replace(".h5", ".cleaned.h5") save_filename = save_filename.replace(".json", ".cleaned.json") Labels.save_file(labels, save_filename) print(f"Saved: {save_filename}") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") parser.add_argument( "-c", "--instance_count", type=int, default=2, help="Count of instances to keep in each frame", ) args = parser.parse_args() fit_tracks(filename=args.data_path, instance_count=args.instance_count) # print(args) ``` #### File: io/format/coco.py ```python import os import numpy as np from sleap import Labels, Video, Skeleton from sleap.gui.dialogs.missingfiles import MissingFilesDialog from sleap.instance import Instance, LabeledFrame, Point, Track from .adaptor import Adaptor, SleapObjectType from .filehandle import FileHandle class LabelsCocoAdaptor(Adaptor): @property def handles(self): return SleapObjectType.labels @property def default_ext(self): return "json" @property def all_exts(self): return ["json"] @property def name(self): return "COCO Dataset JSON" def can_read_file(self, file: FileHandle): if not self.does_match_ext(file.filename): return False if not file.is_json: return False if "annotations" not in file.json: return False if "categories" not in file.json: return False return True def can_write_filename(self, filename: str): return False def does_read(self) -> bool: return True def does_write(self) -> bool: return False @classmethod def read( cls, file: FileHandle, img_dir: str, use_missing_gui: bool = False, *args, **kwargs, ) -> Labels: dicts = file.json # Make skeletons from "categories" skeleton_map = dict() for category in dicts["categories"]: skeleton = Skeleton(name=category["name"]) skeleton_id = category["id"] node_names = category["keypoints"] skeleton.add_nodes(node_names) try: for src_idx, dst_idx in category["skeleton"]: skeleton.add_edge(node_names[src_idx], node_names[dst_idx]) except IndexError as e: # According to the COCO data format specifications[^1], the edges # are supposed to be 1-indexed. But in some of their own # dataset the edges are 1-indexed! So we'll try. # [1]: http://cocodataset.org/#format-data # Clear any edges we already created using 0-indexing skeleton.clear_edges() # Add edges for src_idx, dst_idx in category["skeleton"]: skeleton.add_edge(node_names[src_idx - 1], node_names[dst_idx - 1]) skeleton_map[skeleton_id] = skeleton # Make videos from "images" # Remove images that aren't referenced in the annotations img_refs = [annotation["image_id"] for annotation in dicts["annotations"]] dicts["images"] = list(filter(lambda im: im["id"] in img_refs, dicts["images"])) # Key in JSON file should be "file_name", but sometimes it's "filename", # so we have to check both. img_filename_key = "file_name" if img_filename_key not in dicts["images"][0].keys(): img_filename_key = "filename" # First add the img_dir to each image filename img_paths = [ os.path.join(img_dir, image[img_filename_key]) for image in dicts["images"] ] # See if there are any missing files img_missing = [not os.path.exists(path) for path in img_paths] if sum(img_missing): if use_missing_gui: okay = MissingFilesDialog(img_paths, img_missing).exec_() if not okay: return None else: raise FileNotFoundError( f"Images for COCO dataset could not be found in {img_dir}." ) # Update the image paths (with img_dir or user selected path) for image, path in zip(dicts["images"], img_paths): image[img_filename_key] = path # Create the video objects for the image files image_video_map = dict() vid_id_video_map = dict() for image in dicts["images"]: image_id = image["id"] image_filename = image[img_filename_key] # Sometimes images have a vid_id which links multiple images # together as one video. If so, we'll use that as the video key. # But if there isn't a vid_id, we'll treat each images as a # distinct video and use the image id as the video id. vid_id = image.get("vid_id", image_id) if vid_id not in vid_id_video_map: kwargs = dict(filenames=[image_filename]) for key in ("width", "height"): if key in image: kwargs[key] = image[key] video = Video.from_image_filenames(**kwargs) vid_id_video_map[vid_id] = video frame_idx = 0 else: video = vid_id_video_map[vid_id] frame_idx = video.num_frames video.backend.filenames.append(image_filename) image_video_map[image_id] = (video, frame_idx) # Make instances from "annotations" lf_map = dict() track_map = dict() for annotation in dicts["annotations"]: skeleton = skeleton_map[annotation["category_id"]] image_id = annotation["image_id"] video, frame_idx = image_video_map[image_id] keypoints = np.array(annotation["keypoints"], dtype="int").reshape(-1, 3) track = None if "track_id" in annotation: track_id = annotation["track_id"] if track_id not in track_map: track_map[track_id] = Track(frame_idx, str(track_id)) track = track_map[track_id] points = dict() any_visible = False for i in range(len(keypoints)): node = skeleton.nodes[i] x, y, flag = keypoints[i] if flag == 0: # node not labeled for this instance continue is_visible = flag == 2 any_visible = any_visible or is_visible points[node] = Point(x, y, is_visible) if points: # If none of the points had 2 has the "visible" flag, we'll # assume this incorrect and just mark all as visible. if not any_visible: for point in points.values(): point.visible = True inst = Instance(skeleton=skeleton, points=points, track=track) if image_id not in lf_map: lf_map[image_id] = LabeledFrame(video, frame_idx) lf_map[image_id].insert(0, inst) return Labels(labeled_frames=list(lf_map.values())) ``` #### File: sleap/io/visuals.py ```python from sleap.io.video import Video from sleap.io.videowriter import VideoWriter from sleap.io.dataset import Labels from sleap.util import usable_cpu_count import cv2 import os import numpy as np import math from time import time, clock from typing import Iterable, List, Tuple from queue import Queue from threading import Thread import logging logger = logging.getLogger(__name__) # Object that signals shutdown _sentinel = object() def reader(out_q: Queue, video: Video, frames: List[int], scale: float = 1.0): """Read frame images from video and send them into queue. Args: out_q: Queue to send (list of frame indexes, ndarray of frame images) for chunks of video. video: The `Video` object to read. frames: Full list frame indexes we want to read. scale: Output scale for frame images. Returns: None. """ cv2.setNumThreads(usable_cpu_count()) total_count = len(frames) chunk_size = 64 chunk_count = math.ceil(total_count / chunk_size) logger.info(f"Chunks: {chunk_count}, chunk size: {chunk_size}") i = 0 for chunk_i in range(chunk_count): # Read the next chunk of frames frame_start = chunk_size * chunk_i frame_end = min(frame_start + chunk_size, total_count) frames_idx_chunk = frames[frame_start:frame_end] t0 = clock() # Safely load frames from video, skipping frames we can't load loaded_chunk_idxs, video_frame_images = video.get_frames_safely( frames_idx_chunk ) if scale != 1.0: video_frame_images = resize_images(video_frame_images, scale) elapsed = clock() - t0 fps = len(loaded_chunk_idxs) / elapsed logger.debug(f"reading chunk {i} in {elapsed} s = {fps} fps") i += 1 out_q.put((loaded_chunk_idxs, video_frame_images)) # send _sentinal object into queue to signal that we're done out_q.put(_sentinel) def marker(in_q: Queue, out_q: Queue, labels: Labels, video_idx: int, scale: float): """Annotate frame images (draw instances). Args: in_q: Queue with (list of frame indexes, ndarray of frame images). out_q: Queue to send annotated images as (images, h, w, channels) ndarray. labels: the `Labels` object from which to get data for annotating. video_idx: index of `Video` in `labels.videos` list. Returns: None. """ cv2.setNumThreads(usable_cpu_count()) chunk_i = 0 while True: data = in_q.get() if data is _sentinel: # no more data to be received so stop in_q.put(_sentinel) break frames_idx_chunk, video_frame_images = data t0 = clock() imgs = mark_images( frame_indices=frames_idx_chunk, frame_images=video_frame_images, video_idx=video_idx, labels=labels, scale=scale, ) elapsed = clock() - t0 fps = len(imgs) / elapsed logger.debug(f"drawing chunk {chunk_i} in {elapsed} s = {fps} fps") chunk_i += 1 out_q.put(imgs) # send _sentinal object into queue to signal that we're done out_q.put(_sentinel) def writer( in_q: Queue, progress_queue: Queue, filename: str, fps: float, img_w_h: Tuple[int, int], ): """Write annotated images to video. Args: in_q: Queue with annotated images as (images, h, w, channels) ndarray progress_queue: Queue to send progress as (total frames written: int, elapsed time: float). Send (-1, elapsed time) when done. filename: full path to output video fps: frames per second for output video img_w_h: (w, h) for output video (note width first for opencv) Returns: None. """ cv2.setNumThreads(usable_cpu_count()) w, h = img_w_h writer_object = VideoWriter.safe_builder(filename, height=h, width=w, fps=fps) start_time = clock() total_elapsed = 0 total_frames_written = 0 i = 0 while True: data = in_q.get() if data is _sentinel: # no more data to be received so stop in_q.put(_sentinel) break t0 = clock() for img in data: writer_object.add_frame(img, bgr=True) elapsed = clock() - t0 fps = len(data) / elapsed logger.debug(f"writing chunk {i} in {elapsed} s = {fps} fps") i += 1 total_frames_written += len(data) total_elapsed = clock() - start_time progress_queue.put((total_frames_written, total_elapsed)) writer_object.close() # send (-1, time) to signal done progress_queue.put((-1, total_elapsed)) def save_labeled_video( filename: str, labels: Labels, video: Video, frames: Iterable[int], fps: int = 15, scale: float = 1.0, gui_progress: bool = False, ): """Function to generate and save video with annotations. Args: filename: Output filename. labels: The dataset from which to get data. video: The source :class:`Video` we want to annotate. frames: List of frames to include in output video. fps: Frames per second for output video. gui_progress: Whether to show Qt GUI progress dialog. Returns: None. """ print(f"Writing video with {len(frames)} frame images...") output_width_height = (int(video.width * scale), int(video.height * scale)) t0 = clock() q1 = Queue(maxsize=10) q2 = Queue(maxsize=10) progress_queue = Queue() thread_read = Thread(target=reader, args=(q1, video, frames, scale)) thread_mark = Thread( target=marker, args=(q1, q2, labels, labels.videos.index(video), scale) ) thread_write = Thread( target=writer, args=(q2, progress_queue, filename, fps, output_width_height), ) thread_read.start() thread_mark.start() thread_write.start() progress_win = None if gui_progress: from PySide2 import QtWidgets, QtCore progress_win = QtWidgets.QProgressDialog( f"Generating video with {len(frames)} frames...", "Cancel", 0, len(frames) ) progress_win.setMinimumWidth(300) progress_win.setWindowModality(QtCore.Qt.WindowModal) while True: frames_complete, elapsed = progress_queue.get() if frames_complete == -1: break if progress_win is not None and progress_win.wasCanceled(): break fps = frames_complete / elapsed remaining_frames = len(frames) - frames_complete remaining_time = remaining_frames / fps if gui_progress: progress_win.setValue(frames_complete) else: print( f"Finished {frames_complete} frames in {elapsed} s, fps = {fps}, approx {remaining_time} s remaining" ) elapsed = clock() - t0 fps = len(frames) / elapsed print(f"Done in {elapsed} s, fps = {fps}.") def mark_images(frame_indices, frame_images, video_idx, labels, scale): imgs = [] for i, frame_idx in enumerate(frame_indices): img = get_frame_image( video_frame=frame_images[i], video_idx=video_idx, frame_idx=frame_idx, labels=labels, scale=scale, ) imgs.append(img) return imgs def get_frame_image( video_frame: np.ndarray, video_idx: int, frame_idx: int, labels: Labels, scale: float, ) -> np.ndarray: """Returns single annotated frame image. Args: video_frame: The ndarray of the frame image. video_idx: Index of video in :attribute:`Labels.videos` list. frame_idx: Index of frame in video. labels: The dataset from which to get data. Returns: ndarray of frame image with visual annotations added. """ # Use OpenCV to convert to BGR color image video_frame = img_to_cv(video_frame) # Add the instances to the image plot_instances_cv(video_frame, video_idx, frame_idx, labels, scale) return video_frame def img_to_cv(img: np.ndarray) -> np.ndarray: """Prepares frame image as needed for opencv.""" # Convert RGB to BGR for OpenCV if img.shape[-1] == 3: img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # Convert grayscale to BGR elif img.shape[-1] == 1: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return img def plot_instances_cv( img: np.ndarray, video_idx: int, frame_idx: int, labels: Labels, scale: float ) -> np.ndarray: """Adds visuals annotations to single frame image. Args: img: The ndarray of the frame image. video_idx: Index of video in :attribute:`Labels.videos` list. frame_idx: Index of frame in video. labels: The dataset from which to get data. Returns: ndarray of frame image with visual annotations added. """ cmap = [ [0, 114, 189], [217, 83, 25], [237, 177, 32], [126, 47, 142], [119, 172, 48], [77, 190, 238], [162, 20, 47], ] lfs = labels.find(labels.videos[video_idx], frame_idx) if len(lfs) == 0: return count_no_track = 0 for i, instance in enumerate(lfs[0].instances_to_show): if instance.track in labels.tracks: track_idx = labels.tracks.index(instance.track) else: # Instance without track track_idx = len(labels.tracks) + count_no_track count_no_track += 1 # Get color for instance and convert RGB to BGR for OpenCV inst_color = cmap[track_idx % len(cmap)][::-1] plot_instance_cv(img, instance, inst_color, scale=scale) def has_nans(*vals): return any((np.isnan(val) for val in vals)) def plot_instance_cv( img: np.ndarray, instance: "Instance", color: Iterable[int], unscaled_marker_radius: float = 4, scale: float = 1.0, ) -> np.ndarray: """ Add visual annotations for single instance. Args: img: The ndarray of the frame image. instance: The :class:`Instance` to add to frame image. color: (r, g, b) color for this instance. unscaled_marker_radius: Radius of marker for instance points (nodes). scale: Returns: ndarray of frame image with visual annotations for instance added. """ # Get matrix of all point locations points_array = instance.points_array # Rescale point locations points_array *= scale marker_radius = max(1, int(unscaled_marker_radius // (1 / scale))) for x, y in points_array: # Make sure this is a valid and visible point if not has_nans(x, y): # Convert to ints for opencv (now that we know these aren't nans) x, y = int(x), int(y) # Draw circle to mark node cv2.circle( img, (x, y), marker_radius, color, lineType=cv2.LINE_AA, ) for (src, dst) in instance.skeleton.edge_inds: # Get points for the nodes connected by this edge src_x, src_y = points_array[src] dst_x, dst_y = points_array[dst] # Make sure that both nodes are present in this instance before drawing edge if not has_nans(src_x, src_y, dst_x, dst_y): # Convert to ints for opencv src_x, src_y = int(src_x), int(src_y) dst_x, dst_y = int(dst_x), int(dst_y) # Draw line to mark edge between nodes cv2.line( img, (src_x, src_y), (dst_x, dst_y), color, lineType=cv2.LINE_AA, ) def resize_image(img: np.ndarray, scale: float) -> np.ndarray: """Resizes single image with shape (height, width, channels).""" height, width, channels = img.shape new_height, new_width = int(height // (1 / scale)), int(width // (1 / scale)) # Note that OpenCV takes shape as (width, height). if channels == 1: # opencv doesn't want a single channel to have its own dimension img = cv2.resize(img[:, :], (new_width, new_height))[..., None] else: img = cv2.resize(img, (new_width, new_height)) return img def resize_images(images: np.ndarray, scale: float) -> np.ndarray: return np.stack([resize_image(img, scale) for img in images]) def main_cli(): import argparse from sleap.util import frame_list parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") parser.add_argument( "-o", "--output", type=str, default=None, help="The output filename for the video", ) parser.add_argument("-f", "--fps", type=int, default=15, help="Frames per second") parser.add_argument("--scale", type=float, default=1.0, help="Output image scale") parser.add_argument( "--frames", type=frame_list, default="", help="list of frames to predict. Either comma separated list (e.g. 1,2,3) or " "a range separated by hyphen (e.g. 1-3). (default is entire video)", ) parser.add_argument( "--video-index", type=int, default=0, help="Index of video in labels dataset" ) args = parser.parse_args() labels = Labels.load_file( args.data_path, video_search=[os.path.dirname(args.data_path)] ) if args.video_index >= len(labels.videos): raise IndexError(f"There is no video with index {args.video_index}.") vid = labels.videos[args.video_index] if args.frames is None: frames = sorted([lf.frame_idx for lf in labels if len(lf.instances)]) else: frames = args.frames filename = args.output or args.data_path + ".avi" save_labeled_video( filename=filename, labels=labels, video=vid, frames=frames, fps=args.fps, scale=args.scale, ) print(f"Video saved as: {filename}") if __name__ == "__main__": main_cli() ``` #### File: tests/io/test_visuals.py ```python import numpy as np import os from sleap.io.visuals import save_labeled_video, resize_images, mark_images, VideoWriter def test_resize(small_robot_mp4_vid): imgs = small_robot_mp4_vid[:4] resized_imgs = resize_images(imgs, 0.25) assert resized_imgs.shape[0] == imgs.shape[0] assert resized_imgs.shape[1] == imgs.shape[1] // 4 assert resized_imgs.shape[2] == imgs.shape[2] // 4 assert resized_imgs.shape[3] == imgs.shape[3] def test_serial_pipeline(centered_pair_predictions, tmpdir): frames = [0, 1, 2] video_idx = 0 scale = 0.25 out_path = os.path.join(tmpdir, "clip.avi") video = centered_pair_predictions.videos[video_idx] frame_images = video.get_frames(frames) # Make sure we can resize small_images = resize_images(frame_images, scale=scale) _, height, width, _ = small_images.shape assert height == video.height // (1 / scale) assert width == video.width // (1 / scale) # Make sure we can mark images marked_image_list = mark_images( frame_indices=frames, frame_images=small_images, video_idx=video_idx, labels=centered_pair_predictions, scale=scale, ) marked_point = ( centered_pair_predictions.find(video, frames[0])[0].instances[0].points[0] ) # There's a point at 201, 186 (i.e. 50.25, 46.5), so make sure it got marked assert not np.allclose( marked_image_list[0][44:48, 48:52, 0], small_images[0, 44:48, 48:52, 0] ) # Make sure no change where nothing marked assert np.allclose( marked_image_list[0][10:20, :10, 0], small_images[0, 10:20, :10, 0] ) def test_write_visuals(tmpdir, centered_pair_predictions): path = os.path.join(tmpdir, "clip.avi") save_labeled_video( filename=path, labels=centered_pair_predictions, video=centered_pair_predictions.videos[0], frames=(0, 1, 2), fps=15, ) assert os.path.exists(path) ``` #### File: nn/architectures/test_upsampling.py ```python import numpy as np import tensorflow as tf from sleap.nn.system import use_cpu_only; use_cpu_only() # hide GPUs for test from sleap.nn.architectures import upsampling from sleap.nn.config import UpsamplingConfig class UpsamplingTests(tf.test.TestCase): def test_upsampling_stack(self): upsampling_stack = upsampling.UpsamplingStack( output_stride=4, upsampling_stride=2, transposed_conv=True, transposed_conv_batchnorm=True, refine_convs=1, refine_convs_batchnorm=True, ) x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((8, 8, 32)), current_stride=16 ) model = tf.keras.Model(tf.keras.utils.get_source_inputs(x), x) self.assertAllEqual(x.shape, (None, 32, 32, 64)) self.assertEqual(len(intermediate_feats), 2) self.assertEqual(intermediate_feats[0].stride, 8) self.assertEqual(intermediate_feats[1].stride, 4) self.assertEqual(len(model.layers), 13) self.assertIsInstance(model.layers[1], tf.keras.layers.Conv2DTranspose) def test_upsampling_stack_transposed_filter_rate(self): upsampling_stack = upsampling.UpsamplingStack( output_stride=2, upsampling_stride=2, transposed_conv=True, transposed_conv_filters=16, transposed_conv_filters_rate=2, refine_convs=0, ) x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((4, 4, 2)), current_stride=16 ) model = tf.keras.Model(tf.keras.utils.get_source_inputs(x), x) self.assertEqual(model.get_layer("upsample_s16_to_s8_trans_conv").filters, 16) self.assertEqual(model.get_layer("upsample_s8_to_s4_trans_conv").filters, 32) self.assertEqual(model.get_layer("upsample_s4_to_s2_trans_conv").filters, 64) self.assertAllEqual(x.shape, (None, 32, 32, 64)) def test_upsampling_stack_transposed_filter_rate_shrink(self): upsampling_stack = upsampling.UpsamplingStack( output_stride=2, upsampling_stride=2, transposed_conv=True, transposed_conv_filters=128, transposed_conv_filters_rate=0.5, refine_convs=0, ) x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((4, 4, 2)), current_stride=16 ) model = tf.keras.Model(tf.keras.utils.get_source_inputs(x), x) self.assertEqual(model.get_layer("upsample_s16_to_s8_trans_conv").filters, 128) self.assertEqual(model.get_layer("upsample_s8_to_s4_trans_conv").filters, 64) self.assertEqual(model.get_layer("upsample_s4_to_s2_trans_conv").filters, 32) self.assertAllEqual(x.shape, (None, 32, 32, 32)) def test_upsampling_stack_refine_convs_filter_rate(self): upsampling_stack = upsampling.UpsamplingStack( output_stride=2, upsampling_stride=2, transposed_conv=False, refine_convs=2, refine_convs_filters=16, refine_convs_filters_rate=2, ) x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((4, 4, 2)), current_stride=16 ) model = tf.keras.Model(tf.keras.utils.get_source_inputs(x), x) self.assertEqual(model.get_layer("upsample_s16_to_s8_refine0_conv").filters, 16) self.assertEqual(model.get_layer("upsample_s16_to_s8_refine1_conv").filters, 16) self.assertEqual(model.get_layer("upsample_s8_to_s4_refine0_conv").filters, 32) self.assertEqual(model.get_layer("upsample_s8_to_s4_refine1_conv").filters, 32) self.assertEqual(model.get_layer("upsample_s4_to_s2_refine0_conv").filters, 64) self.assertEqual(model.get_layer("upsample_s4_to_s2_refine1_conv").filters, 64) self.assertAllEqual(x.shape, (None, 32, 32, 64)) def test_upsampling_stack_upsampling_stride4(self): upsampling_stack = upsampling.UpsamplingStack( output_stride=4, upsampling_stride=4 ) x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((8, 8, 32)), current_stride=16 ) self.assertAllEqual(x.shape, (None, 32, 32, 64)) self.assertEqual(len(intermediate_feats), 1) def test_upsampling_stack_upsampling_interp(self): upsampling_stack = upsampling.UpsamplingStack( output_stride=8, upsampling_stride=2, transposed_conv=False ) x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((8, 8, 32)), current_stride=16 ) self.assertAllEqual(x.shape, (None, 16, 16, 64)) model = tf.keras.Model(tf.keras.utils.get_source_inputs(x), x) self.assertIsInstance(model.layers[1], tf.keras.layers.UpSampling2D) def test_upsampling_stack_upsampling_skip(self): upsampling_stack = upsampling.UpsamplingStack( output_stride=2, upsampling_stride=2, skip_add=False, transposed_conv=True, transposed_conv_filters=16, refine_convs=0, ) skip_sources = [ upsampling.IntermediateFeature( tensor=tf.keras.Input((16, 16, 1)), stride=8 ), upsampling.IntermediateFeature( tensor=tf.keras.Input((32, 32, 2)), stride=4 ), ] x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((8, 8, 32)), current_stride=16, skip_sources=skip_sources ) model = tf.keras.Model(tf.keras.utils.get_source_inputs(x), x) self.assertAllEqual(x.shape, (None, 64, 64, 16)) self.assertEqual(len(intermediate_feats), 3) self.assertIsInstance(model.layers[1], tf.keras.layers.Conv2DTranspose) self.assertIsInstance(model.layers[2], tf.keras.layers.BatchNormalization) self.assertIsInstance(model.layers[4], tf.keras.layers.Activation) self.assertIsInstance(model.layers[5], tf.keras.layers.Concatenate) self.assertAllEqual(model.layers[5].output.shape, (None, 16, 16, 17)) self.assertIsInstance(model.layers[10], tf.keras.layers.Concatenate) self.assertAllEqual(model.layers[10].output.shape, (None, 32, 32, 18)) def test_upsampling_stack_upsampling_add(self): upsampling_stack = upsampling.UpsamplingStack( output_stride=2, upsampling_stride=2, skip_add=True, transposed_conv=True, transposed_conv_filters=16, refine_convs=0, ) skip_sources = [ upsampling.IntermediateFeature( tensor=tf.keras.Input((16, 16, 1)), stride=8 ), upsampling.IntermediateFeature( tensor=tf.keras.Input((32, 32, 2)), stride=4 ), ] x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((8, 8, 32)), current_stride=16, skip_sources=skip_sources ) model = tf.keras.Model(tf.keras.utils.get_source_inputs(x), x) self.assertAllEqual(x.shape, (None, 64, 64, 16)) self.assertEqual(len(intermediate_feats), 3) self.assertAllEqual( model.get_layer("upsample_s16_to_s8_skip_conv1x1").output.shape, (None, 16, 16, 16), ) self.assertAllEqual( model.get_layer("upsample_s8_to_s4_skip_conv1x1").output.shape, (None, 32, 32, 16), ) self.assertIsInstance( model.get_layer("upsample_s16_to_s8_skip_add"), tf.keras.layers.Add ) def test_upsampling_stack(self): upsampling_stack = upsampling.UpsamplingStack.from_config(UpsamplingConfig( method="transposed_conv", skip_connections="concatenate", block_stride=2, filters=64, filters_rate=1., refine_convs=1, batch_norm=True, transposed_conv_kernel_size=4, ), output_stride=4 ) x, intermediate_feats = upsampling_stack.make_stack( tf.keras.Input((8, 8, 32)), current_stride=16 ) model = tf.keras.Model(tf.keras.utils.get_source_inputs(x), x) self.assertAllEqual(x.shape, (None, 32, 32, 64)) self.assertEqual(len(intermediate_feats), 2) self.assertEqual(intermediate_feats[0].stride, 8) self.assertEqual(intermediate_feats[1].stride, 4) self.assertEqual(len(model.layers), 13) self.assertIsInstance(model.layers[1], tf.keras.layers.Conv2DTranspose) ``` #### File: tests/nn/test_tracker_nms.py ```python import numpy as np from sleap.nn.tracking import nms_fast, nms_instances from sleap.instance import PredictedInstance from sleap.skeleton import Skeleton def test_nms(): boxes = np.array( [[10, 10, 20, 20], [10, 10, 15, 15], [30, 30, 40, 40], [32, 32, 42, 42]] ) scores = np.array([1, 0.3, 1, 0.5]) picks = nms_fast(boxes, scores, iou_threshold=0.5) assert sorted(picks) == [0, 2] def test_nms_with_target(): boxes = np.array( [[10, 10, 20, 20], [10, 10, 15, 15], [30, 30, 40, 40], [32, 32, 42, 42]] ) # Box 1 is suppressed and has lowest score scores = np.array([1, 0.3, 1, 0.5]) picks = nms_fast(boxes, scores, iou_threshold=0.5, target_count=3) assert sorted(picks) == [0, 2, 3] # Box 3 is suppressed and has lowest score scores = np.array([1, 0.5, 1, 0.3]) picks = nms_fast(boxes, scores, iou_threshold=0.5, target_count=3) assert sorted(picks) == [0, 1, 2] def test_nms_instances_to_remove(): skeleton = Skeleton() skeleton.add_nodes(("a", "b")) instances = [] inst = PredictedInstance(skeleton=skeleton) inst["a"].x = 10 inst["a"].y = 10 inst["b"].x = 20 inst["b"].y = 20 inst.score = 1 instances.append(inst) inst = PredictedInstance(skeleton=skeleton) inst["a"].x = 10 inst["a"].y = 10 inst["b"].x = 15 inst["b"].y = 15 inst.score = 0.3 instances.append(inst) inst = PredictedInstance(skeleton=skeleton) inst["a"].x = 30 inst["a"].y = 30 inst["b"].x = 40 inst["b"].y = 40 inst.score = 1 instances.append(inst) inst = PredictedInstance(skeleton=skeleton) inst["a"].x = 32 inst["a"].y = 32 inst["b"].x = 42 inst["b"].y = 42 inst.score = 0.5 instances.append(inst) to_keep, to_remove = nms_instances(instances, iou_threshold=0.5, target_count=3) assert len(to_remove) == 1 assert to_remove[0].matches(instances[1]) ```
{ "source": "jenskutilek/drawbot-skia", "score": 3 }
#### File: src/drawbot_skia/document.py ```python from abc import ABC, abstractmethod import logging import os import pathlib import tempfile import skia class Document(ABC): # pageWidth # pageHeight @property @abstractmethod def isDrawing(self): return ... @abstractmethod def beginPage(self, width: int, height: int) -> skia.Canvas: return ... @abstractmethod def endPage(self): ... @abstractmethod def setFrameDuration(self, duration): ... @abstractmethod def saveImage(self, path, **kwargs): ... DEFAULT_FRAMEDURATION = 1/10 class RecordingDocument(Document): def __init__(self): self._pictures = [] self._frameDurations = [] self._currentRecorder = None self._currentFrameDuration = DEFAULT_FRAMEDURATION self.pageWidth = self.pageHeight = None @property def isDrawing(self): return self._currentRecorder is not None def beginPage(self, width, height): assert self._currentRecorder is None self.pageWidth = width self.pageHeight = height self._currentRecorder = skia.PictureRecorder() return self._currentRecorder.beginRecording(width, height) def endPage(self): self._pictures.append(self._currentRecorder.finishRecordingAsPicture()) self._frameDurations.append(self._currentFrameDuration) self._currentRecorder = None self._currentFrameDuration = DEFAULT_FRAMEDURATION self.pageWidth = self.pageHeight = None def setFrameDuration(self, duration): self._currentFrameDuration = duration def saveImage(self, path, **kwargs): path = pathlib.Path(path).resolve() suffix = path.suffix.lower().lstrip(".") methodName = f"_saveImage_{suffix}" method = getattr(self, methodName, None) if method is None: raise ValueError(f"unsupported file type: {suffix}") method(path, **kwargs) def _saveImage_pdf(self, path, **kwargs): stream = skia.FILEWStream(os.fspath(path)) with skia.PDF.MakeDocument(stream) as document: for picture in self._pictures: x, y, width, height = picture.cullRect() assert x == 0 and y == 0 with document.page(width, height) as canvas: canvas.drawPicture(picture) stream.flush() def _saveImage_svg(self, path, **kwargs): for picture, framePath in _iteratePictures(self._pictures, path): x, y, width, height = picture.cullRect() assert x == 0 and y == 0 stream = skia.FILEWStream(os.fspath(framePath)) canvas = skia.SVGCanvas.Make((width, height), stream) canvas.drawPicture(picture) del canvas stream.flush() def _saveImage_png(self, path, **kwargs): _savePixelImages(self._pictures, path, skia.kPNG) def _saveImage_jpeg(self, path, **kwargs): _savePixelImages(self._pictures, path, skia.kJPEG, whiteBackground=True) _saveImage_jpg = _saveImage_jpeg def _saveImage_mp4(self, path, codec="libx264", **kwargs): from .ffmpeg import generateMP4 if not self._pictures: # Empty mp4? return frameRate = max(1, round(1 / self._frameDurations[-1])) if len(set(self._frameDurations)) != 1: logging.warning("ignoring varying frame durations for mp4 export") with tempfile.TemporaryDirectory(prefix="drawbot-skia-") as tempDir: tempDir = pathlib.Path(tempDir) imagePath = tempDir / "frame.png" _savePixelImages( self._pictures, imagePath, skia.kPNG, whiteBackground=True, singlePage=False, ) imagesTemplate = tempDir / "frame_%d.png" generateMP4(imagesTemplate, path, frameRate, codec=codec) def _savePixelImages(pictures, path, format, whiteBackground=False, singlePage=None): for picture, framePath in _iteratePictures(pictures, path, singlePage): _savePixelImage(picture, framePath, format, whiteBackground=whiteBackground) def _iteratePictures(pictures, path, singlePage=None): if singlePage is None: singlePage = len(pictures) == 1 for index, picture in enumerate(pictures): if singlePage: framePath = path else: framePath = path.parent / f"{path.stem}_{index}{path.suffix}" yield picture, framePath def _savePixelImage(picture, path, format, whiteBackground=False): x, y, width, height = picture.cullRect() assert x == 0 and y == 0 surface = skia.Surface(int(width), int(height)) with surface as canvas: if whiteBackground: canvas.clear(skia.ColorWHITE) canvas.drawPicture(picture) image = surface.makeImageSnapshot() image.save(os.fspath(path), format) class PixelDocument(Document): ... class MP4Document(PixelDocument): ... class PDFDocument(Document): ... class SVGDocument(Document): ... ``` #### File: src/drawbot_skia/path.py ```python import logging import math import skia from fontTools.misc.transform import Transform from fontTools.pens.basePen import BasePen from fontTools.pens.pointPen import PointToSegmentPen, SegmentToPointPen from .gstate import TextStyle # TODO: # - textBox # MAYBE: # - contours # - expandStroke # - intersectionPoints # - offCurvePoints # - onCurvePoints # - optimizePath # - points # - svgClass # - svgID # - svgLink # - traceImage class BezierPath(BasePen): def __init__(self, path=None, glyphSet=None): super().__init__(glyphSet) if path is None: path = skia.Path() self.path = path def _moveTo(self, pt): self.path.moveTo(*pt) def _lineTo(self, pt): self.path.lineTo(*pt) def _curveToOne(self, pt1, pt2, pt3): x1, y1 = pt1 x2, y2 = pt2 x3, y3 = pt3 self.path.cubicTo(x1, y1, x2, y2, x3, y3) def _qCurveToOne(self, pt1, pt2): x1, y1 = pt1 x2, y2 = pt2 self.path.quadTo(x1, y1, x2, y2) def _closePath(self): self.path.close() def beginPath(self, identifier=None): self._pointToSegmentPen = PointToSegmentPen(self) self._pointToSegmentPen.beginPath() def addPoint(self, point, segmentType=None, smooth=False, name=None, identifier=None, **kwargs): if not hasattr(self, "_pointToSegmentPen"): raise AttributeError("path.beginPath() must be called before the path can be used as a point pen") self._pointToSegmentPen.addPoint( point, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier, **kwargs ) def endPath(self): if hasattr(self, "_pointToSegmentPen"): # We are drawing as a point pen pointToSegmentPen = self._pointToSegmentPen del self._pointToSegmentPen pointToSegmentPen.endPath() def arc(self, center, radius, startAngle, endAngle, clockwise): cx, cy = center diameter = radius * 2 rect = (cx - radius, cy - radius, diameter, diameter) sweepAngle = (endAngle - startAngle) % 360 if clockwise: sweepAngle -= 360 self.path.arcTo(rect, startAngle, sweepAngle, False) def arcTo(self, point1, point2, radius): self.path.arcTo(point1, point2, radius) def rect(self, x, y, w, h): self.path.addRect((x, y, w, h)) def oval(self, x, y, w, h): self.path.addOval((x, y, w, h)) def line(self, pt1, pt2): points = [(x, y) for x, y in [pt1, pt2]] self.path.addPoly(points, False) def polygon(self, firstPoint, *points, close=True): points = [(x, y) for x, y in (firstPoint,) + points] self.path.addPoly(points, close) def pointInside(self, point): x, y = point return self.path.contains(x, y) def bounds(self): if self.path.countVerbs() == 0: return None return tuple(self.path.computeTightBounds()) def controlPointBounds(self): if self.path.countVerbs() == 0: return None return tuple(self.path.getBounds()) def reverse(self): path = skia.Path() path.reverseAddPath(self.path) self.path = path def appendPath(self, other): self.path.addPath(other.path) def copy(self): path = skia.Path(self.path) return BezierPath(path=path) def translate(self, x, y): self.path.offset(x, y) def scale(self, x, y=None, center=(0, 0)): if y is None: y = x self.transform((x, 0, 0, y, 0, 0), center=center) def rotate(self, angle, center=(0, 0)): t = Transform() t = t.rotate(math.radians(angle)) self.transform(t, center=center) def skew(self, x, y=0, center=(0, 0)): t = Transform() t = t.skew(math.radians(x), math.radians(y)) self.transform(t, center=center) def transform(self, transform, center=(0, 0)): cx, cy = center t = Transform() t = t.translate(cx, cy) t = t.transform(transform) t = t.translate(-cx, -cy) matrix = skia.Matrix() matrix.setAffine(t) self.path.transform(matrix) def drawToPen(self, pen): it = skia.Path.Iter(self.path, False) needEndPath = False for verb, points in it: penVerb, startIndex, numPoints = _pathVerbsToPenMethod.get(verb, (None, None, None)) if penVerb is None: continue assert len(points) == numPoints, (verb, numPoints, len(points)) if penVerb == "conicTo": # We should only call _convertConicToCubicDirty() # if it.conicWeight() == sqrt(2)/2, but skia-python doesn't # give the correct value. # https://github.com/kyamagu/skia-python/issues/116 # if abs(it.conicWeight() - 0.707...) > 1e-10: # logging.warning("unsupported conic form (weight != sqrt(2)/2): conic to cubic conversion will be bad") # TODO: we should fall back to skia.Path.ConvertConicToQuads(), # but that call is currently also not working. pen.curveTo(*_convertConicToCubicDirty(*points)) elif penVerb == "closePath": needEndPath = False pen.closePath() else: if penVerb == "moveTo": if needEndPath: pen.endPath() needEndPath = True pointArgs = ((x, y) for x, y in points[startIndex:]) getattr(pen, penVerb)(*pointArgs) if needEndPath: pen.endPath() def drawToPointPen(self, pen): self.drawToPen(SegmentToPointPen(pen)) def text(self, txt, offset=None, font=None, fontSize=10, align=None): if not txt: return textStyle = TextStyle(font=font, fontSize=fontSize) glyphsInfo = textStyle.shape(txt) textStyle.alignGlyphPositions(glyphsInfo, align) gids = sorted(set(glyphsInfo.gids)) paths = [textStyle.skFont.getPath(gid) for gid in gids] for path in paths: path.transform(FLIP_MATRIX) paths = dict(zip(gids, paths)) x, y = (0, 0) if offset is None else offset for gid, pos in zip(glyphsInfo.gids, glyphsInfo.positions): path = paths[gid] self.path.addPath(path, pos[0] + x, pos[1] + y) def _doPathOp(self, other, operator): from pathops import Path, op path1 = Path() path2 = Path() self.drawToPen(path1.getPen()) other.drawToPen(path2.getPen()) result = op( path1, path2, operator, fix_winding=True, keep_starting_points=True, ) resultPath = BezierPath() result.draw(resultPath) return resultPath def union(self, other): from pathops import PathOp return self._doPathOp(other, PathOp.UNION) def intersection(self, other): from pathops import PathOp return self._doPathOp(other, PathOp.INTERSECTION) def difference(self, other): from pathops import PathOp return self._doPathOp(other, PathOp.DIFFERENCE) def xor(self, other): from pathops import PathOp return self._doPathOp(other, PathOp.XOR) def removeOverlap(self): from pathops import Path path = Path() self.drawToPen(path.getPen()) path.simplify( fix_winding=True, keep_starting_points=False, ) resultPath = BezierPath() path.draw(resultPath) self.path = resultPath.path __mod__ = difference def __imod__(self, other): result = self.difference(other) self.path = result.path return self __or__ = union def __ior__(self, other): result = self.union(other) self.path = result.path return self __and__ = intersection def __iand__(self, other): result = self.intersection(other) self.path = result.path return self __xor__ = xor def __ixor__(self, other): result = self.xor(other) self.path = result.path return self FLIP_MATRIX = skia.Matrix() FLIP_MATRIX.setAffine((1, 0, 0, -1, 0, 0)) def _convertConicToCubicDirty(pt1, pt2, pt3): # # NOTE: we do a crude conversion from a conic segment to a cubic bezier, # for two common cases, based on the following assumptions: # - drawbot itself does not allow conics to be drawn # - skia draws conics implicitly for oval(), arc() and arcTo() # - for oval the conic segments span 90 degrees # - for arc and arcTo the conic segments do not span more than 90 degrees # - for arc and arcTo the conic segments are circular, never elliptical # For all these cases, the conic weight will be (close to) zero. # # This no longer holds once a path has been transformed with skew or x/y # scale, in which case we need to fall back to # skia.Path.ConvertConicToQuads(), but that is blocked by # https://github.com/kyamagu/skia-python/issues/115 # https://github.com/justvanrossum/drawbot-skia/issues/7 # (x1, y1), (x2, y2), (x3, y3) = pt1, pt2, pt3 dx1 = x2 - x1 dy1 = y2 - y1 dx2 = x2 - x3 dy2 = y2 - y3 angle1 = math.atan2(dy1, dx1) angle2 = math.atan2(-dy2, -dx2) angleDiff = (angle1 - angle2) % (2 * math.pi) if angleDiff > math.pi: angleDiff = 2 * math.pi - angleDiff if abs(angleDiff - math.pi / 2) < 0.0001: # angle is close enough to 90 degrees, we use stupid old BEZIER_ARC_MAGIC handleRatio = 0.5522847498 else: # Fall back to the circular assumption: |pt1 pt2| == |pt2 pt3| d1 = math.hypot(dx1, dy1) d2 = math.hypot(dx2, dy2) if abs(d1 - d2) > 0.00001: logging.warning("unsupported conic form (non-circular, non-90-degrees): conic to cubic conversion will be bad") # TODO: we should fall back to skia.Path.ConvertConicToQuads(), # but that call is currently not working. angleHalf = angleDiff / 2 radius = d1 / math.tan(angleHalf) D = radius * (1 - math.cos(angleHalf)) handleLength = (4 * D / 3) / math.sin(angleHalf) # length of the bcp line handleRatio = handleLength / d1 return ( (x1 + dx1 * handleRatio, y1 + dy1 * handleRatio), (x3 + dx2 * handleRatio, y3 + dy2 * handleRatio), (x3, y3), ) _pathVerbsToPenMethod = { skia.Path.Verb.kMove_Verb: ("moveTo", 0, 1), skia.Path.Verb.kLine_Verb: ("lineTo", 1, 2), skia.Path.Verb.kCubic_Verb: ("curveTo", 1, 4), skia.Path.Verb.kQuad_Verb: ("qCurveTo", 1, 3), skia.Path.Verb.kConic_Verb: ("conicTo", 1, 3), skia.Path.Verb.kClose_Verb: ("closePath", 1, 1), # skia.Path.Verb.kDone_Verb: (None, None), # "StopIteration", not receiving when using Python iterator } ``` #### File: drawbot-skia/tests/test_path.py ```python from drawbot_skia.path import BezierPath def test_path_bounds(): path = BezierPath() assert path.bounds() is None path.rect(10, 20, 30, 40) assert path.bounds() == (10, 20, 40, 60) def test_path_controlPointBounds(): path = BezierPath() assert path.controlPointBounds() is None path.moveTo((0, 0)) path.curveTo((50, 100), (100, 100), (150, 0)) assert path.bounds() == (0.0, 0.0, 150.0, 75.0) assert path.controlPointBounds() == (0.0, 0.0, 150.0, 100.0) def test_path_copy(): path1 = BezierPath() path1.rect(0, 0, 100, 100) path2 = path1.copy() path1.translate(50, 20) assert path1.bounds() == (50.0, 20.0, 150.0, 120.0) assert path2.bounds() == (0.0, 0.0, 100.0, 100.0) def test_path_point_args(): path1 = BezierPath() path1.moveTo([0, 0]) path1.lineTo([0, 100]) path1.curveTo([50, 100], [100, 100], [200, 0]) def test_path_line_args(): path1 = BezierPath() path1.line([0, 0], [0, 100]) ```
{ "source": "jenskutilek/glyphsLib", "score": 2 }
#### File: glyphsLib/tests/conftest.py ```python import py import pytest @pytest.fixture def datadir(request): return py.path.local(py.path.local(__file__).dirname).join("data") @pytest.fixture(scope="session", params=["defcon", "ufoLib2"]) def ufo_module(request): return pytest.importorskip(request.param) # Provide a --run-regression-tests CLI option to run slow regression tests separately. def pytest_addoption(parser): parser.addoption( "--run-regression-tests", action="store_true", help="Run (slow) regression tests", ) def pytest_configure(config): config.addinivalue_line( "markers", "regression_test: mark test as a (slow) regression test" ) def pytest_collection_modifyitems(config, items): if config.getoption("--run-regression-tests"): # --run-regression-tests given in cli: do not skip slow tests return skip_regression_test = pytest.mark.skip( reason="need --run-regression-tests option to run" ) for item in items: if "regression_test" in item.keywords: item.add_marker(skip_regression_test) ``` #### File: glyphsLib/tests/eraseOpenCorners_test.py ```python import pytest from defcon import Font from glyphsLib.filters.eraseOpenCorners import EraseOpenCornersFilter @pytest.fixture( params=[ { "glyphs": [ {"name": "space", "width": 500}, { "name": "hasCornerGlyph", "width": 600, "outline": [ ("moveTo", ((20, 0),)), ("lineTo", ((179, 0),)), ("lineTo", ((60, 353),)), ("lineTo", ((198, 360),)), ("closePath", ()), ], }, { "name": "curvyCornerGlyph", "width": 600, "outline": [ ("moveTo", ((400, 0),)), ("curveTo", ((400, 100), (450, 300), (300, 300))), ("lineTo", ((200, 100),)), ("curveTo", ((250, 100), (450, 150), (450, 50))), ("closePath", ()), ], }, { "name": "doubleCornerGlyph", "width": 600, "outline": [ ("moveTo", ((100, 0),)), ("lineTo", ((400, 0),)), ("lineTo", ((400, 500),)), ("lineTo", ((500, 400),)), ("lineTo", ((0, 400),)), ("lineTo", ((100, 500),)), ("closePath", ()), ], }, { "name": "doubleCornerGlyphTrickyBitInMiddle", "width": 600, "outline": [ ("moveTo", ((100, 500),)), ("lineTo", ((100, 0),)), ("lineTo", ((400, 0),)), ("lineTo", ((400, 500),)), ("lineTo", ((500, 400),)), ("lineTo", ((0, 400),)), ("closePath", ()), ], }, { "name": "curveCorner", "width": 600, "outline": [ ("moveTo", ((316, 437),)), ( "curveTo", ( (388.67761, 437.0), (446.1305580343, 401.4757887467), (475, 344), ), ), ("lineTo", ((588, 407),)), ("lineTo", ((567, 260),)), ("curveTo", ((567, 414), (464, 510), (316, 510))), ("closePath", ()), ], }, { "name": "curveCornerPlusSquare", "width": 600, "outline": [ ("moveTo", ((316, 437),)), ( "curveTo", ( (388.67761, 437.0), (446.1305580343, 401.4757887467), (475, 344), ), ), ("lineTo", ((588, 407),)), ("lineTo", ((567, 260),)), ("curveTo", ((567, 414), (464, 510), (316, 510))), ("closePath", ()), ("moveTo", ((100, 100),)), ("lineTo", ((100, 200),)), ("lineTo", ((200, 200),)), ("lineTo", ((200, 100),)), ("closePath", ()), ], }, { "name": "squarePlusCurveCorner", "width": 600, "outline": [ ("moveTo", ((100, 100),)), ("lineTo", ((100, 200),)), ("lineTo", ((200, 200),)), ("lineTo", ((200, 100),)), ("closePath", ()), ("moveTo", ((316, 437),)), ( "curveTo", ( (388.67761, 437.0), (446.1305580343, 401.4757887467), (475, 344), ), ), ("lineTo", ((588, 407),)), ("lineTo", ((567, 260),)), ("curveTo", ((567, 414), (464, 510), (316, 510))), ("closePath", ()), ], }, ] } ] ) def font(request): font = Font() for param in request.param["glyphs"]: glyph = font.newGlyph(param["name"]) glyph.width = param.get("width", 0) pen = glyph.getPen() for operator, operands in param.get("outline", []): getattr(pen, operator)(*operands) glyph = font.newGlyph(param["name"] + ".reversed") glyph.width = param.get("width", 0) pen = glyph.getPen() for operator, operands in param.get("outline", []): getattr(pen, operator)(*operands) for c in glyph: c.reverse() return font def test_empty_glyph(font): glyph = "space" for g in [glyph, glyph + ".reversed"]: philter = EraseOpenCornersFilter(include=g) assert not philter(font) def test_corner_glyph(font): philter = EraseOpenCornersFilter(include={"hasCornerGlyph"}) assert philter(font) newcontour = font["hasCornerGlyph"][0] assert len(newcontour) == 3 assert newcontour[2].x == pytest.approx(114.5417) assert newcontour[2].y == pytest.approx(191.2080) philter = EraseOpenCornersFilter(include={"hasCornerGlyph.reversed"}) assert not philter(font) def test_curve_curve_glyph(font): philter = EraseOpenCornersFilter(include={"curvyCornerGlyph"}) assert philter(font) newcontour = font["curvyCornerGlyph"][0] assert len(newcontour) == 7 assert newcontour[0].x == pytest.approx(406.4859) assert newcontour[0].y == pytest.approx(104.5666) philter = EraseOpenCornersFilter(include={"curvyCornerGlyph.reversed"}) assert not philter(font) def test_double_corner_glyph(font): philter = EraseOpenCornersFilter(include={"doubleCornerGlyph"}) assert philter(font) newcontour = font["doubleCornerGlyph"][0] assert len(newcontour) == 4 assert newcontour[0].x == 100 and newcontour[0].y == 0 assert newcontour[1].x == 400 and newcontour[1].y == 0 assert newcontour[2].x == 400 and newcontour[2].y == 400 assert newcontour[3].x == 100 and newcontour[3].y == 400 philter = EraseOpenCornersFilter(include={"doubleCornerGlyph.reversed"}) assert not philter(font) # In this, the corner point of the contour is at array index 0, so we # need to wrap around the array indexes. def test_double_corner_glyph_wrap(font): philter = EraseOpenCornersFilter(include={"doubleCornerGlyphTrickyBitInMiddle"}) assert philter(font) newcontour = font["doubleCornerGlyphTrickyBitInMiddle"][0] assert len(newcontour) == 4 assert newcontour[0].x == 100 and newcontour[0].y == 400 assert newcontour[1].x == 100 and newcontour[1].y == 0 assert newcontour[2].x == 400 and newcontour[2].y == 0 assert newcontour[3].x == 400 and newcontour[3].y == 400 def test_curve_corner(font): oldcontour = font["curveCorner"][0] assert len(oldcontour) == 9 philter = EraseOpenCornersFilter(include={"curveCorner"}) assert philter(font) newcontour = font["curveCorner"][0] assert len(newcontour) == 8 assert newcontour[5].x == pytest.approx(501.81019332487494) assert newcontour[5].y == pytest.approx(462.5782044264) philter = EraseOpenCornersFilter(include={"curveCorner.reversed"}) assert not philter(font) def test_curve_corner_plus_square(font): oldcontour = font["curveCornerPlusSquare"][0] assert len(oldcontour) == 9 philter = EraseOpenCornersFilter(include={"curveCornerPlusSquare"}) assert philter(font) assert len(font["curveCornerPlusSquare"][1]) == 4 newcontour = font["curveCornerPlusSquare"][0] assert len(newcontour) == 8 assert newcontour[5].x == pytest.approx(501.81019332487494) assert newcontour[5].y == pytest.approx(462.5782044264) philter = EraseOpenCornersFilter(include={"curveCornerPlusSquare.reversed"}) assert not philter(font) def test_square_plus_curve_corner(font): oldcontour = font["squarePlusCurveCorner"][1] assert len(oldcontour) == 9 philter = EraseOpenCornersFilter(include={"squarePlusCurveCorner"}) assert philter(font) assert len(font["squarePlusCurveCorner"][0]) == 4 newcontour = font["squarePlusCurveCorner"][1] assert len(newcontour) == 8 assert newcontour[5].x == pytest.approx(501.81019332487494) assert newcontour[5].y == pytest.approx(462.5782044264) philter = EraseOpenCornersFilter(include={"squarePlusCurveCorner.reversed"}) assert not philter(font) ``` #### File: glyphsLib/tests/tokens_test.py ```python import os import pytest from glyphsLib.classes import GSFont from glyphsLib.builder.tokens import TokenExpander from glyphsLib.builder import to_ufos TESTFONT = GSFont( os.path.join(os.path.dirname(__file__), os.path.join("data", "TokenTest.glyphs")) ) master = TESTFONT.masters[1] expander = TokenExpander(TESTFONT, master) @pytest.mark.parametrize( "test_input,expected,throws", [ ("sub a by b;", "sub a by b;", False), ("pos a $padding b;", "pos a 250 b;", False), (r"pos a ${padding} b;", "pos a 250 b;", False), (r"pos a ${padding * 2} b;", "pos a 500 b;", False), (r"pos a ${padding + padding} b;", "pos a 500 b;", False), (r"pos a ${padding + (padding/2)} b;", "pos a 375 b;", False), ("pos a $xxx b;", "", True), # Tests from Glyphs tutorial ( "$[name endswith '.sc']", "A.sc", False, ), # will expand to all glyph names that end in ".sc" ("$[not name endswith '.sc']", "A Sacute", False), ("$[name endswith '.sc' or not name endswith '.sc']", "A.sc A Sacute", False), ("$[name endswith '.sc' and not name endswith '.sc']", "", False), # ('$[layer0.width < 500]', "", False), # layer0 = first master # ('$[layers.count > 1]', "", False), # compare numbers with: == != <= >= < > # ('$[direction == 2]', "", False), # 0=LTR, 1=BiDi, 2=RTL # ('$[colorIndex == 5]', "", False), # ('$[case == smallCaps]', "", False), # predefined constants: noCase, upper, lower, smallCaps, minor, other ( '$[name matches "S|s.*"]', "A.sc Sacute", False, ), # "matches": regular expression # ('$[leftMetricsKey like "*"]', "", False), # "like": wildcard search # ('$[name like "*e*"]', "", False), # e anywhere in the glyph name ('$[script like "latin"]', "A", False), ('$[category like "Separator"]', "Sacute", False), ('$[leftKerningGroup like "H"]', "A", False), ('$[rightKerningGroup like "L"]', "A", False), ('$[unicode beginswith "41"]', "A", False), # beginswith, endswith, contains ('$[note contains "love it"]', "A.sc", False), # glyph note # ('$[countOfUnicodes > 1]', "", False), # ('$[countOfLayers > 1]', "", False), ('$[subCategory like "Arrow"]', "Sacute", False), # ('$[hasHints == 0]', "", False), # boolean: false, no, 0 versus true, yes, 1 # ('$[isColorGlyph == true]', "", False), ( '$[script == "latin"]', "A", False, ), # connect multiple conditions with ORor AND # ('$[hasComponents == true and script == "latin"]', "", False), # connect multiple conditions with ORor AND # ('$[hasTrueTypeHints == false]', "", False), # ('$[hasAlignedWidth == true]', "", False), # ('$[hasPostScriptHints == true]', "", False), # ('$[hasAnnotations == true]', "", False), # ('$[hasCorners == true]', "", False), # corners = corner components # ('$[hasSpecialLayers == yes]', "", False), # special layers = color, brace and bracket layers # ('$[isHangulKeyGlyph == no]', "", False), ], ) def test_token_expander(test_input, expected, throws): if throws: with pytest.raises(ValueError): expander.expand(test_input) else: output = expander.expand(test_input) assert output == expected def test_end_to_end(): ufos = to_ufos(TESTFONT) assert "@SmallCaps = [ A.sc" in ufos[0].features.text assert "pos A A.sc 100" in ufos[0].features.text assert "pos A A.sc 500" in ufos[1].features.text ``` #### File: glyphsLib/tests/writer_test.py ```python import unittest from io import StringIO from textwrap import dedent from collections import OrderedDict import os from glyphsLib import classes from glyphsLib.types import parse_datetime, Point, Rect from glyphsLib.writer import dump, dumps from . import test_helpers class WriterTest(unittest.TestCase, test_helpers.AssertLinesEqual): def assertWrites(self, glyphs_object, text, format_version=2): """Assert that the given object, when given to the writer, produces the given text. """ expected = text.splitlines() actual = test_helpers.write_to_lines(glyphs_object, format_version) self.assertLinesEqual( expected, actual, "The writer has not produced the expected output" ) def assertWritesValue(self, glyphs_value, text, format_version=2): """Assert that the writer produces the given text for the given value.""" expected = ( dedent( """\ {{ writtenValue = {0}; }} """ ) .format(text) .splitlines() ) # We wrap the value in a dict to use the same test helper actual = test_helpers.write_to_lines( {"writtenValue": glyphs_value}, format_version ) self.assertLinesEqual( expected, actual, "The writer has not produced the expected output" ) def test_write_font_attributes(self): """Test the writer on all GSFont attributes""" font = classes.GSFont() # List of properties from https://docu.glyphsapp.com/#gsfont # parent: not handled because it's internal and read-only # masters m1 = classes.GSFontMaster() m1.id = "M1" font.masters.insert(0, m1) m2 = classes.GSFontMaster() m2.id = "M2" font.masters.insert(1, m2) # instances i1 = classes.GSInstance() i1.name = "MuchBold" font.instances.append(i1) # glyphs g1 = classes.GSGlyph() g1.name = "G1" font.glyphs.append(g1) # classes c1 = classes.GSClass() c1.name = "C1" font.classes.append(c1) # features f1 = classes.GSFeature() f1.name = "F1" font.features.append(f1) # featurePrefixes fp1 = classes.GSFeaturePrefix() fp1.name = "FP1" font.featurePrefixes.append(fp1) # copyright font.copyright = "Copyright Bob" # designer font.designer = "Bob" # designerURL font.designerURL = "bob.me" # manufacturer font.manufacturer = "Manu" # manufacturerURL font.manufacturerURL = "manu.com" # versionMajor font.versionMajor = 2 # versionMinor font.versionMinor = 104 # date font.date = parse_datetime("2017-10-03 07:35:46 +0000") # familyName font.familyName = "Sans Rien" # upm font.upm = 2000 # note font.note = "Was bored, made this" # kerning font.kerning = OrderedDict( [("M1", OrderedDict([("@MMK_L_G1", OrderedDict([("@MMK_R_G1", 0.1)]))]))] ) # userData font.userData = { "a": "test", "b": [1, {"c": 2}], "d": [1, "1"], "noodleThickness": "106.0", } # grid -> gridLength font.grid = 35 # gridSubDivisions font.gridSubDivisions = 5 # keyboardIncrement font.keyboardIncrement = 1.2 # disablesNiceNames font.disablesNiceNames = True # customParameters font.customParameters["ascender"] = 300 # selection: not written # selectedLayers: not written # selectedFontMaster: not written # masterIndex: not written # currentText: not written # tabs: not written # currentTab: not written # filepath: not written # tool: not written # tools: not handled because it is a read-only list of GUI features # .appVersion (extra property that is not in the docs!) font.appVersion = "895" self.assertWrites( font, dedent( """\ { .appVersion = "895"; classes = ( { code = ""; name = C1; } ); copyright = "Copyright Bob"; customParameters = ( { name = note; value = "Was bored, made this"; }, { name = ascender; value = 300; } ); date = "2017-10-03 07:35:46 +0000"; designer = Bob; designerURL = bob.me; disablesNiceNames = 1; familyName = "Sans Rien"; featurePrefixes = ( { code = ""; name = FP1; } ); features = ( { code = ""; name = F1; } ); fontMaster = ( { ascender = 800; capHeight = 700; descender = -200; id = M1; xHeight = 500; }, { ascender = 800; capHeight = 700; descender = -200; id = M2; xHeight = 500; } ); glyphs = ( { glyphname = G1; } ); gridLength = 35; gridSubDivision = 5; instances = ( { name = MuchBold; } ); kerning = { M1 = { "@MMK_L_G1" = { "@MMK_R_G1" = 0.1; }; }; }; keyboardIncrement = 1.2; manufacturer = Manu; manufacturerURL = manu.com; unitsPerEm = 2000; userData = { a = test; b = ( 1, { c = 2; } ); d = ( 1, "1" ); noodleThickness = "106.0"; }; versionMajor = 2; versionMinor = 104; } """ ), ) # Don't write the keyboardIncrement if it's 1 (default) font.keyboardIncrement = 1 written = test_helpers.write_to_lines(font) self.assertFalse(any("keyboardIncrement" in line for line in written)) # Always write versionMajor and versionMinor, even when 0 font.versionMajor = 0 font.versionMinor = 0 written = test_helpers.write_to_lines(font) self.assertIn("versionMajor = 0;", written) self.assertIn("versionMinor = 0;", written) def test_write_font_master_attributes(self): """Test the writer on all GSFontMaster attributes""" master = classes.GSFontMaster() # List of properties from https://docu.glyphsapp.com/#gsfontmaster # id master.id = "MASTER-ID" # name master._name = "Name Hairline Megawide" master.customParameters["Master Name"] = "Param Hairline Megawide" # weight master.weight = "Thin" # width master.width = "Wide" # weightValue master.weightValue = 0.01 # widthValue master.widthValue = 0.99 # customValue # customName master.customName = "Overextended" # A value of 0.0 is not written to the file. master.customValue = 0.001 master.customValue1 = 0.1 master.customValue2 = 0.2 master.customValue3 = 0.3 # ascender master.ascender = 234.5 # capHeight master.capHeight = 200.6 # xHeight master.xHeight = 59.1 # descender master.descender = -89.2 # italicAngle master.italicAngle = 12.2 # verticalStems master.verticalStems = [1, 2, 3] # horizontalStems master.horizontalStems = [4, 5, 6] # alignmentZones zone = classes.GSAlignmentZone(0, -30) master.alignmentZones = [zone] # blueValues: not handled because it is read-only # otherBlues: not handled because it is read-only # guides guide = classes.GSGuide() guide.name = "middle" master.guides.append(guide) # userData master.userData["rememberToMakeTea"] = True # customParameters master.customParameters["underlinePosition"] = -135 self.assertWrites( master, dedent( """\ { alignmentZones = ( "{0, -30}" ); ascender = 234.5; capHeight = 200.6; custom = Overextended; customValue = 0.001; customValue1 = 0.1; customValue2 = 0.2; customValue3 = 0.3; customParameters = ( { name = "<NAME>"; value = "Param Hairline Megawide"; }, { name = underlinePosition; value = -135; } ); descender = -89.2; guideLines = ( { name = middle; } ); horizontalStems = ( 4, 5, 6 ); id = "MASTER-ID"; italicAngle = 12.2; name = "<NAME>airline Megawide"; userData = { rememberToMakeTea = 1; }; verticalStems = ( 1, 2, 3 ); weight = Thin; weightValue = 0.01; width = Wide; widthValue = 0.99; xHeight = 59.1; } """ ), ) # Write the capHeight and xHeight even if they are "0" master.xHeight = 0 master.capHeight = 0 written = test_helpers.write_to_lines(master) self.assertIn("xHeight = 0;", written) self.assertIn("capHeight = 0;", written) def test_write_alignment_zone(self): zone = classes.GSAlignmentZone(23, 40) self.assertWritesValue(zone, '"{23, 40}"') def test_write_instance(self): instance = classes.GSInstance() # List of properties from https://docu.glyphsapp.com/#gsinstance # active instance.active = True # name instance.name = "SemiBoldCompressed (name)" # weight instance.weight = "SemiBold (weight)" # width instance.width = "Compressed (width)" # weightValue instance.weightValue = 600 # widthValue instance.widthValue = 200 # customValue instance.customValue = 0.4 # isItalic instance.isItalic = True # isBold instance.isBold = True # linkStyle instance.linkStyle = "linked style value" # familyName instance.familyName = "Sans Rien (familyName)" # preferredFamily instance.preferredFamily = "Sans Rien (preferredFamily)" # preferredSubfamilyName instance.preferredSubfamilyName = ( "Semi Bold Compressed (preferredSubFamilyName)" ) # windowsFamily instance.windowsFamily = "Sans Rien MS (windowsFamily)" # windowsStyle: read only # windowsLinkedToStyle: read only # fontName instance.fontName = "SansRien (fontName)" # fullName instance.fullName = "Sans Rien Semi Bold Compressed (fullName)" # customParameters instance.customParameters["hheaLineGap"] = 10 # instanceInterpolations instance.instanceInterpolations = {"M1": 0.2, "M2": 0.8} # manualInterpolation instance.manualInterpolation = True # interpolatedFont: read only self.assertWrites( instance, dedent( """\ { customParameters = ( { name = familyName; value = "Sans Rien (familyName)"; }, { name = preferredFamily; value = "Sans Rien (preferredFamily)"; }, { name = preferredSubfamilyName; value = "Semi Bold Compressed (preferredSubFamilyName)"; }, { name = styleMapFamilyName; value = "Sans Rien MS (windowsFamily)"; }, { name = postscriptFontName; value = "SansRien (fontName)"; }, { name = postscriptFullName; value = "Sans Rien Semi Bold Compressed (fullName)"; }, { name = hheaLineGap; value = 10; } ); interpolationCustom = 0.4; interpolationWeight = 600; interpolationWidth = 200; instanceInterpolations = { M1 = 0.2; M2 = 0.8; }; isBold = 1; isItalic = 1; linkStyle = "linked style value"; manualInterpolation = 1; name = "SemiBoldCompressed (name)"; weightClass = "SemiBold (weight)"; widthClass = "Compressed (width)"; } """ ), ) def test_write_custom_parameter(self): # Name without quotes self.assertWritesValue( classes.GSCustomParameter("myParam", "myValue"), "{\nname = myParam;\nvalue = myValue;\n}", ) # Name with quotes self.assertWritesValue( classes.GSCustomParameter("my param", "myValue"), '{\nname = "my param";\nvalue = myValue;\n}', ) # Value with quotes self.assertWritesValue( classes.GSCustomParameter("myParam", "my value"), '{\nname = myParam;\nvalue = "my value";\n}', ) # Int param (ascender): should convert the value to string self.assertWritesValue( classes.GSCustomParameter("ascender", 12), "{\nname = ascender;\nvalue = 12;\n}", ) # Float param (postscriptBlueScale): should convert the value to string self.assertWritesValue( classes.GSCustomParameter("postscriptBlueScale", 0.125), "{\nname = postscriptBlueScale;\nvalue = 0.125;\n}", ) # Bool param (isFixedPitch): should convert the boolean value to 0/1 self.assertWritesValue( classes.GSCustomParameter("isFixedPitch", True), "{\nname = isFixedPitch;\nvalue = 1;\n}", ) # Intlist param: should map list of int to list of strings self.assertWritesValue( classes.GSCustomParameter("fsType", [1, 2]), "{\nname = fsType;\nvalue = (\n1,\n2\n);\n}", ) def test_write_class(self): class_ = classes.GSClass() class_.name = "e" class_.code = "e eacute egrave" class_.automatic = True self.assertWrites( class_, dedent( """\ { automatic = 1; code = "e eacute egrave"; name = e; } """ ), ) # When the code is an empty string, write an empty string class_.code = "" self.assertWrites( class_, dedent( """\ { automatic = 1; code = ""; name = e; } """ ), ) def test_write_feature_prefix(self): fp = classes.GSFeaturePrefix() fp.name = "Languagesystems" fp.code = "languagesystem DFLT dflt;" fp.automatic = True self.assertWrites( fp, dedent( """\ { automatic = 1; code = "languagesystem DFLT dflt;"; name = Languagesystems; } """ ), ) def test_write_feature(self): feature = classes.GSFeature() feature.name = "sups" feature.code = " sub @standard by @sups;" feature.automatic = True feature.notes = "notes about sups" self.assertWrites( feature, dedent( """\ { automatic = 1; code = " sub @standard by @sups;"; name = sups; notes = "notes about sups"; } """ ), ) def test_write_glyph(self): glyph = classes.GSGlyph() # https://docu.glyphsapp.com/#gsglyph # parent: not written # layers # Put the glyph in a font with at least one master for the magic in # `glyph.layers.append()` to work. font = classes.GSFont() master = classes.GSFontMaster() master.id = "MASTER-ID" font.masters.insert(0, master) font.glyphs.append(glyph) layer = classes.GSLayer() layer.layerId = "LAYER-ID" layer.name = "L1" glyph.layers.insert(0, layer) # name glyph.name = "Aacute" # unicode glyph.unicode = "00C1" # string: not written # id: not written # category glyph.category = "Letter" # subCategory glyph.subCategory = "Uppercase" # script glyph.script = "latin" # productionName glyph.productionName = "Aacute.prod" # glyphInfo: not written # leftKerningGroup glyph.leftKerningGroup = "A" # rightKerningGroup glyph.rightKerningGroup = "A" # leftKerningKey: not written # rightKerningKey: not written # leftMetricsKey glyph.leftMetricsKey = "A" # rightMetricsKey glyph.rightMetricsKey = "A" # widthMetricsKey glyph.widthMetricsKey = "A" # export glyph.export = False # color glyph.color = 11 # colorObject: not written # note glyph.note = "Stunning one-bedroom A with renovated acute accent" # selected: not written # mastersCompatible: not stored # userData glyph.userData["rememberToMakeCoffe"] = True # Check that empty collections are written glyph.userData["com.someoneelse.coolsoftware.customdata"] = [ OrderedDict( [("zero", 0), ("emptyList", []), ("emptyDict", {}), ("emptyString", "")] ), [], {}, "", "hey", 0, 1, ] # smartComponentAxes axis = classes.GSSmartComponentAxis() axis.name = "crotchDepth" glyph.smartComponentAxes.append(axis) # lastChange glyph.lastChange = parse_datetime("2017-10-03 07:35:46 +0000") self.assertWrites( glyph, dedent( """\ { color = 11; export = 0; glyphname = Aacute; lastChange = "2017-10-03 07:35:46 +0000"; layers = ( { associatedMasterId = "MASTER-ID"; layerId = "LAYER-ID"; name = L1; width = 600; } ); leftKerningGroup = A; leftMetricsKey = A; widthMetricsKey = A; note = "Stunning one-bedroom A with renovated acute accent"; rightKerningGroup = A; rightMetricsKey = A; unicode = 00C1; script = latin; category = Letter; subCategory = Uppercase; userData = { com.someoneelse.coolsoftware.customdata = ( { zero = 0; emptyList = ( ); emptyDict = { }; emptyString = ""; }, ( ), { }, "", hey, 0, 1 ); rememberToMakeCoffe = 1; }; partsSettings = ( { name = crotchDepth; bottomValue = 0; topValue = 0; } ); } """ ), ) # Write the script even when it's an empty string # Same for category and subCategory glyph.script = "" glyph.category = "" glyph.subCategory = "" written = test_helpers.write_to_lines(glyph) self.assertIn('script = "";', written) self.assertIn('category = "";', written) self.assertIn('subCategory = "";', written) # Write double unicodes glyph.unicodes = ["00C1", "E002"] written = test_helpers.write_to_lines(glyph) self.assertIn('unicode = "00C1,E002";', written) def test_write_layer(self): layer = classes.GSLayer() # http://docu.glyphsapp.com/#gslayer # parent: not written # name layer.name = "{125, 100}" # associatedMasterId layer.associatedMasterId = "M1" # layerId layer.layerId = "L1" # color layer.color = (1, 2, 3, 4) # colorObject: read-only, computed # components component = classes.GSComponent(glyph="glyphName") layer.components.append(component) # guides guide = classes.GSGuide() guide.name = "xheight" layer.guides.append(guide) # annotations annotation = classes.GSAnnotation() annotation.type = classes.TEXT annotation.text = "Fuck, this curve is ugly!" layer.annotations.append(annotation) # hints hint = classes.GSHint() hint.name = "hintName" layer.hints.append(hint) # anchors anchor = classes.GSAnchor() anchor.name = "top" layer.anchors["top"] = anchor # paths path = classes.GSPath() layer.paths.append(path) # selection: read-only # LSB, RSB, TSB, BSB: not written # width layer.width = 890.4 # leftMetricsKey layer.leftMetricsKey = "A" # rightMetricsKey layer.rightMetricsKey = "A" # widthMetricsKey layer.widthMetricsKey = "A" # bounds: read-only, computed # selectionBounds: read-only, computed # background # XXX bg is unused? bg = layer.background # noqa: F841 # backgroundImage image = classes.GSBackgroundImage("/path/to/file.jpg") layer.backgroundImage = image # bezierPath: read-only, objective-c # openBezierPath: read-only, objective-c # completeOpenBezierPath: read-only, objective-c # isAligned # FIXME: (jany) is this read-only? # is this computed from each component's alignment? # layer.isAligned = False # userData layer.userData["rememberToMakeCoffe"] = True # smartComponentPoleMapping layer.smartComponentPoleMapping["crotchDepth"] = 2 # Top pole layer.smartComponentPoleMapping["shoulderWidth"] = 1 # Bottom pole self.assertWrites( layer, dedent( """\ { anchors = ( { name = top; position = "{0, 0}"; } ); annotations = ( { text = "Fuck, this curve is ugly!"; type = 1; } ); associatedMasterId = M1; background = { }; backgroundImage = { crop = "{{0, 0}, {0, 0}}"; imagePath = "/path/to/file.jpg"; }; color = (1, 2, 3, 4); components = ( { name = glyphName; } ); guideLines = ( { name = xheight; } ); hints = ( { name = hintName; } ); layerId = L1; leftMetricsKey = A; widthMetricsKey = A; rightMetricsKey = A; name = "{125, 100}"; paths = ( { closed = 1; } ); userData = { PartSelection = { crotchDepth = 2; shoulderWidth = 1; }; rememberToMakeCoffe = 1; }; width = 890.4; } """ ), ) # Don't write a blank layer name layer.name = "" written = test_helpers.write_to_lines(layer) self.assertNotIn('name = "";', written) # Write the width even if 0 layer.width = 0 written = test_helpers.write_to_lines(layer) self.assertIn("width = 0;", written) def test_write_anchor(self): anchor = classes.GSAnchor("top", Point(23, 45.5)) self.assertWrites( anchor, dedent( """\ { name = top; position = "{23, 45.5}"; } """ ), ) # Write a position of 0, 0 anchor = classes.GSAnchor("top", Point(0, 0)) self.assertWrites( anchor, dedent( """\ { name = top; position = "{0, 0}"; } """ ), ) def test_write_component(self): component = classes.GSComponent("dieresis") # http://docu.glyphsapp.com/#gscomponent # position component.position = Point(45.5, 250) # scale component.scale = 2.0 # rotation component.rotation = 90 # componentName: already set at init # component: read-only # layer: read-only # transform: already set using scale & position # bounds: read-only, objective-c # automaticAlignment component.automaticAlignment = True # anchor component.anchor = "top" # selected: not written # smartComponentValues component.smartComponentValues = {"crotchDepth": -77} # bezierPath: read-only, objective-c self.assertWrites( component, dedent( """\ { anchor = top; name = dieresis; piece = { crotchDepth = -77; }; transform = "{0, 2, -2, 0, 45.5, 250}"; } """ ), ) def test_write_smart_component_axis(self): axis = classes.GSSmartComponentAxis() # http://docu.glyphsapp.com/#gssmartcomponentaxis axis.name = "crotchDepth" axis.topName = "High" axis.topValue = 0 axis.bottomName = "Low" axis.bottomValue = -100 self.assertWrites( axis, dedent( """\ { name = crotchDepth; bottomName = Low; bottomValue = -100; topName = High; topValue = 0; } """ ), ) def test_write_path(self): path = classes.GSPath() # http://docu.glyphsapp.com/#gspath # parent: not written # nodes node = classes.GSNode() path.nodes.append(node) # segments: computed, objective-c # closed path.closed = True # direction: computed # bounds: computed # selected: not written # bezierPath: computed self.assertWrites( path, dedent( """\ { closed = 1; nodes = ( "0 0 LINE" ); } """ ), ) def test_write_node(self): node = classes.GSNode(Point(10, 30), classes.CURVE) # http://docu.glyphsapp.com/#gsnode # position: already set # #type: already set # smooth node.smooth = True # connection: deprecated # selected: not written # index, nextNode, prevNode: computed # name node.name = "top-left corner" # userData node.userData["rememberToDownloadARealRemindersApp"] = True self.assertWritesValue( node, '"10 30 CURVE SMOOTH {name = \\"top-left corner\\";\\n\ rememberToDownloadARealRemindersApp = 1;}"', ) # Write floating point coordinates node = classes.GSNode(Point(499.99, 512.01), classes.OFFCURVE) self.assertWritesValue(node, '"499.99 512.01 OFFCURVE"') # Write userData with special characters test_user_data = { "\nkey\"';\n\n\n": "\"'value\nseveral lines\n;\n", ";": ";\n", "escapeception": "\\\"\\'\\n\\\\n", } node = classes.GSNode(Point(130, 431), classes.LINE) for key, value in test_user_data.items(): node.userData[key] = value # This is the output of Glyphs 1089 expected_output = ( '"130 431 LINE {\\"\\012key\\\\"\';\\012\\012\\012\\" ' '= \\"\\\\"\'value\\012several lines\\012;\\012\\"' ';\\n\\";\\" = \\";\\012\\";\\n' 'escapeception = \\"\\\\\\\\"\\\\\'\\\\\\n\\\\\\\\\\n\\";}"' ) self.assertWritesValue(node, expected_output) # Check that we can read the userData back node = classes.GSNode().read(expected_output[1:-1]) self.assertEqual(test_user_data, dict(node.userData)) def test_write_guideline(self): line = classes.GSGuide() # http://docu.glyphsapp.com/#GSGuideLine line.position = Point(56, 45) line.angle = 11.0 line.name = "italic angle" # selected: not written self.assertWrites( line, dedent( """\ { angle = 11; name = "italic angle"; position = "{56, 45}"; } """ ), ) def test_write_annotation(self): annotation = classes.GSAnnotation() # http://docu.glyphsapp.com/#gsannotation annotation.position = Point(12, 34) annotation.type = classes.TEXT annotation.text = "Look here" annotation.angle = 123.5 annotation.width = 135 self.assertWrites( annotation, dedent( """\ { angle = 123.5; position = "{12, 34}"; text = "Look here"; type = 1; width = 135; } """ ), ) def test_write_hint(self): hint = classes.GSHint() # http://docu.glyphsapp.com/#gshint layer = classes.GSLayer() path1 = classes.GSPath() layer.paths.append(path1) node1 = classes.GSNode(Point(100, 100)) path1.nodes.append(node1) hint.originNode = node1 node2 = classes.GSNode(Point(200, 200)) path1.nodes.append(node2) hint.targetNode = node2 node3 = classes.GSNode(Point(300, 300)) path1.nodes.append(node3) hint.otherNode1 = node3 path2 = classes.GSPath() layer.paths.append(path2) node4 = classes.GSNode(Point(400, 400)) path2.nodes.append(node4) hint.otherNode2 = node4 hint.type = classes.CORNER hint.options = classes.TTROUND | classes.TRIPLE hint.horizontal = True # selected: not written hint.name = "My favourite hint" self.assertWrites( hint, dedent( """\ { horizontal = 1; origin = "{0, 0}"; target = "{0, 1}"; other1 = "{0, 2}"; other2 = "{1, 0}"; type = 16; name = "My favourite hint"; options = 128; } """ ), ) # FIXME: (jany) What about the undocumented scale & stem? # -> Add a test for that # Test with target = "up" # FIXME: (jany) what does target = "up" mean? # Is there an official python API to write that? # hint.targetNode = 'up' # written = test_helpers.write_to_lines(hint) # self.assertIn('target = up;', written) def test_write_hint_v3(self): hint = classes.GSHint() # http://docu.glyphsapp.com/#gshint layer = classes.GSLayer() path1 = classes.GSPath() layer.paths.append(path1) node1 = classes.GSNode(Point(100, 100)) path1.nodes.append(node1) hint.originNode = node1 node2 = classes.GSNode(Point(200, 200)) path1.nodes.append(node2) hint.targetNode = node2 node3 = classes.GSNode(Point(300, 300)) path1.nodes.append(node3) hint.otherNode1 = node3 path2 = classes.GSPath() layer.paths.append(path2) node4 = classes.GSNode(Point(400, 400)) path2.nodes.append(node4) hint.otherNode2 = node4 hint.type = classes.CORNER hint.options = classes.TTROUND | classes.TRIPLE hint.horizontal = True # selected: not written hint.name = "My favourite hint" self.assertWrites( hint, dedent( """\ { horizontal = 1; origin = (0,0); target = (0,1); other1 = (0,2); other2 = (1,0); type = 16; name = "My favourite hint"; options = 128; } """, ), format_version=3, ) # FIXME: (jany) What about the undocumented scale & stem? # -> Add a test for that # Test with target = "up" # FIXME: (jany) what does target = "up" mean? # Is there an official python API to write that? # hint.targetNode = 'up' # written = test_helpers.write_to_lines(hint) # self.assertIn('target = up;', written) def test_write_background_image(self): image = classes.GSBackgroundImage("/tmp/img.jpg") # http://docu.glyphsapp.com/#gsbackgroundimage # path: already set # image: read-only, objective-c image.crop = Rect(Point(0, 10), Point(500, 510)) image.locked = True image.alpha = 70 image.position = Point(40, 90) image.scale = (1.1, 1.2) image.rotation = 0.3 # transform: Already set with scale/rotation self.assertWrites( image, dedent( """\ { alpha = 70; crop = "{{0, 10}, {500, 510}}"; imagePath = "/tmp/img.jpg"; locked = 1; transform = "{1.09998, 0.00576, -0.00628, 1.19998, 40, 90}"; } """ ), ) class WriterDumpInterfaceTest(unittest.TestCase): def test_dump(self): obj = classes.GSFont() fp = StringIO() dump(obj, fp) self.assertTrue(fp.getvalue()) def test_dumps(self): obj = classes.GSFont() string = dumps(obj) self.assertTrue(string) class WriterRoundtripTest(unittest.TestCase, test_helpers.AssertParseWriteRoundtrip): def test_roundtrip_on_file(self): filename = os.path.join( os.path.dirname(__file__), "data/GlyphsUnitTestSans.glyphs" ) self.assertParseWriteRoundtrip(filename) if __name__ == "__main__": unittest.main() ```
{ "source": "jenskutilek/HumbleTypeInstructionCompiler", "score": 3 }
#### File: HumbleTypeInstructionCompiler/htic/block.py ```python from __future__ import absolute_import from .accumulator import Accumulator class Block(object): def __init__(self): self.last = None def add(self, instruction): if self.last: if self.last.canMerge(instruction): self.last.merge(instruction) return else: instruction.chain(self.last) self.last = instruction def write(self, writer): if self.last: accumulator = Accumulator(writer) self.last.write(accumulator) accumulator.flush() ``` #### File: HumbleTypeInstructionCompiler/htic/instruction.py ```python from __future__ import absolute_import from .argument import IntegerArgument from .block import Block from .error import HumbleError class Instruction(object): @staticmethod def newInstruction(name): if name == "ABS": return Instruction(name, 0x64, 0, ('getVAL',), True) elif name == "ADD": return Instruction(name, 0x60, 0, ('getVAL', 'getVAL'), True) elif name == "ALIGNPTS": return Instruction(name, 0x27, 0, ('getVAL', 'getVAL'), False) elif name == "ALIGNRP": return LoopInstruction(name, 0x3C, 0, ('getVAL', 'getVALS'), False) elif name == "AND": return Instruction(name, 0x5A, 0, ('getVAL', 'getVAL'), True) elif name == "CALL": return CallInstruction(name, 0x2B, 0, ('CALL',), True, 1) elif name == "CEILING": return Instruction(name, 0x67, 0, ('getVAL',), True) elif name == "CINDEX": return Instruction(name, 0x25, 0, ('getVAL',), True) elif name == "CLEAR": return Instruction(name, 0x22, 0, (), True) # special case for mightPush elif name == "DEBUG": return Instruction(name, 0x4F, 0, ('getVAL',), False) elif name == "deltac": return DeltaInstruction(name.upper(), None, 0, ('getCVT', 'getDELTA', 'getDELTAS'), False) elif name == "deltap": return DeltaInstruction(name.upper(), None, 0, ('getVAL', 'getDELTA', 'getDELTAS'), False) elif name == "DEPTH": return Instruction(name, 0x24, 0, (), True) elif name == "DUP": return Instruction(name, 0x20, 0, (), True) elif name == "DIV": return Instruction(name, 0x62, 0, ('getVAL', 'getVAL'), True) elif name == "EIF": return Instruction(name, 0x59, 0, (), False) elif name == "ELSE": return BlockInstruction(name, 0x1B, 0, (), False, ("EIF",)) elif name == "ENDF": return Instruction(name, 0x2D, 0, (), False) elif name == "EQ": return Instruction(name, 0x54, 0, ('getVAL', 'getVAL'), True) elif name == "EVEN": return Instruction(name, 0x57, 0, ('getVAL',), True) elif name == "FDEF": return BlockInstruction(name, 0x2C, 0, ('setFUNC',), False, ("ENDF",)) elif name == "FLIPOFF": return Instruction(name, 0x4E, 0, (), False) elif name == "FLIPON": return Instruction(name, 0x4D, 0, (), False) elif name == "FLIPPT": return LoopInstruction(name, 0x80, 0, ('getVAL', 'getVALS'), False) elif name == "FLIPRGOFF": return Instruction(name, 0x82, 0, ('getVAL', 'getVAL'), False) elif name == "FLIPRGON": return Instruction(name, 0x81, 0, ('getVAL', 'getVAL'), False) elif name == "FLOOR": return Instruction(name, 0x66, 0, ('getVAL',), True) elif name == "GC": return Instruction(name, 0x46, 1, ('getVAL',), True) elif name == "GETINFO": return Instruction(name, 0x88, 0, ('getVAL',), True) elif name == "GFV": return Instruction(name, 0x0D, 0, (), True) elif name == "GPV": return Instruction(name, 0x0C, 0, (), True) elif name == "GT": return Instruction(name, 0x52, 0, ('getVAL', 'getVAL'), True) elif name == "GTEQ": return Instruction(name, 0x53, 0, ('getVAL', 'getVAL'), True) elif name == "IF": return BlockInstruction(name, 0x58, 0, ('getVAL',), False, ("ELSE", "EIF")) elif name == "INSTCTRL": return Instruction(name, 0x8E, 0, ('getVAL', 'getVAL'), False) elif name == "IP": return LoopInstruction(name, 0x39, 0, ('getVAL', 'getVALS'), False) elif name == "ISECT": return Instruction(name, 0x0F, 0, ('getVAL', 'getVAL', 'getVAL', 'getVAL', 'getVAL'), False) elif name == "IUP": return Instruction(name, 0x30, 1, (), False) elif name == "JMPR": return Instruction(name, 0x1C, 0, ('getVAL',), False) elif name == "JROF": return Instruction(name, 0x79, 0, ('getVAL', 'getVAL'), False) elif name == "JROT": return Instruction(name, 0x78, 0, ('getVAL', 'getVAL'), False) elif name == "LOOPCALL": return CallInstruction(name, 0x2A, 0, ('getVAL', 'CALL'), True, 2) elif name == "LT": return Instruction(name, 0x50, 0, ('getVAL', 'getVAL'), True) elif name == "LTEQ": return Instruction(name, 0x51, 0, ('getVAL', 'getVAL'), True) elif name == "MAX": return Instruction(name, 0x8B, 0, ('getVAL', 'getVAL'), True) elif name == "MD": return Instruction(name, 0x49, 1, ('getVAL', 'getVAL'), True) elif name == "MDAP": return Instruction(name, 0x2E, 1, ('getVAL',), False) elif name == "MDRP": return Instruction(name, 0xC0, 31, ('getVAL',), False) elif name == "MIAP": return Instruction(name, 0x3E, 1, ('getVAL', 'getCVT'), False) elif name == "MIN": return Instruction(name, 0x8C, 0, ('getVAL', 'getVAL'), True) elif name == "MINDEX": return Instruction(name, 0x26, 0, ('getVAL',), True) elif name == "MIRP": return Instruction(name, 0xE0, 31, ('getVAL', 'getCVT'), False) elif name == "MPPEM": return Instruction(name, 0x4B, 0, (), True) elif name == "MPS": return Instruction(name, 0x4C, 0, (), True) elif name == "MSIRP": return Instruction(name, 0x3A, 1, ('getVAL', 'getVAL'), False) elif name == "MUL": return Instruction(name, 0x63, 0, ('getVAL', 'getVAL'), True) elif name == "NEG": return Instruction(name, 0x65, 0, ('getVAL',), True) elif name == "NEQ": return Instruction(name, 0x55, 0, ('getVAL', 'getVAL'), True) elif name == "NOT": return Instruction(name, 0x5C, 0, ('getVAL',), True) elif name == "NROUND": return Instruction(name, 0x6C, 3, ('getVAL',), True) elif name == "ODD": return Instruction(name, 0x56, 0, ('getVAL',), True) elif name == "OR": return Instruction(name, 0x5B, 0, ('getVAL', 'getVAL'), True) elif name == "POP": return Instruction(name, 0x21, 0, (), True) # special case for mightPush elif name == "push": return Instruction(None, None, 0, ('getVALS',), True) elif name == "RCVT": return Instruction(name, 0x45, 0, ('getCVT',), True) elif name == "RDTG": return Instruction(name, 0x7D, 0, (), False) elif name == "ROFF": return Instruction(name, 0x7A, 0, (), False) elif name == "ROLL": return Instruction(name, 0x8A, 0, (), True) elif name == "ROUND": return Instruction(name, 0x68, 3, ('getVAL',), True) elif name == "RS": return Instruction(name, 0x43, 0, ('getSTOR',), True) elif name == "RTDG": return Instruction(name, 0x3D, 0, (), False) elif name == "RTG": return Instruction(name, 0x18, 0, (), False) elif name == "RTHG": return Instruction(name, 0x19, 0, (), False) elif name == "RUTG": return Instruction(name, 0x7C, 0, (), False) elif name == "S45ROUND": return Instruction(name, 0x77, 0, ('getVAL',), False) elif name == "SCANCTRL": return Instruction(name, 0x85, 0, ('getVAL',), False) elif name == "SCANTYPE": return Instruction(name, 0x8D, 0, ('getVAL',), False) elif name == "SCFS": return Instruction(name, 0x48, 0, ('getVAL', 'getVAL'), False) elif name == "SCVTCI": return Instruction(name, 0x1D, 0, ('getVAL',), False) elif name == "SDB": return Instruction(name, 0x5E, 0, ('getVAL',), False) elif name == "SDPVTL": return Instruction(name, 0x86, 1, ('getVAL', 'getVAL'), False) elif name == "SDS": return Instruction(name, 0x5F, 0, ('getVAL',), False) elif name == "SFVFS": return Instruction(name, 0x0B, 0, ('getVAL', 'getVAL'), False) elif name == "SFVTCA": return Instruction(name, 0x04, 1, (), False) elif name == "SFVTL": return Instruction(name, 0x08, 1, ('getVAL', 'getVAL'), False) elif name == "SFVTPV": return Instruction(name, 0x0E, 0, (), False) elif name == "SHC": return Instruction(name, 0x34, 1, ('getVAL',), False) elif name == "SHP": return LoopInstruction(name, 0x32, 1, ('getVAL', 'getVALS'), False) elif name == "SHPIX": return Instruction(name, 0x38, 0, ('getVAL', 'getVAL', 'getVALS'), False) elif name == "SHZ": return Instruction(name, 0x36, 1, ('getVAL',), False) elif name == "SLOOP": return Instruction(name, 0x17, 0, ('getVAL',), False) elif name == "SMD": return Instruction(name, 0x1A, 0, ('getVAL',), False) elif name == "SPVFS": return Instruction(name, 0x0A, 0, ('getVAL', 'getVAL'), False) elif name == "SPVTCA": return Instruction(name, 0x02, 1, (), False) elif name == "SPVTL": return Instruction(name, 0x06, 1, ('getVAL', 'getVAL'), False) elif name == "SROUND": return Instruction(name, 0x76, 0, ('getVAL',), False) elif name == "SRP0": return Instruction(name, 0x10, 0, ('getVAL',), False) elif name == "SRP1": return Instruction(name, 0x11, 0, ('getVAL',), False) elif name == "SRP2": return Instruction(name, 0x12, 0, ('getVAL',), False) elif name == "SSW": return Instruction(name, 0x1F, 0, ('getVAL',), False) elif name == "SSWCI": return Instruction(name, 0x1E, 0, ('getVAL',), False) elif name == "SUB": return Instruction(name, 0x61, 0, ('getVAL', 'getVAL'), True) elif name == "SVTCA": return Instruction(name, 0x00, 1, (), False) elif name == "SWAP": return Instruction(name, 0x23, 0, (), True) elif name == "SZP0": return Instruction(name, 0x13, 0, ('getVAL',), False) elif name == "SZP1": return Instruction(name, 0x14, 0, ('getVAL',), False) elif name == "SZP2": return Instruction(name, 0x15, 0, ('getVAL',), False) elif name == "SZPS": return Instruction(name, 0x16, 0, ('getVAL',), False) elif name == "UTP": return Instruction(name, 0x29, 0, ('getVAL',), False) elif name == "void": return BlockInstruction("FDEF", 0x2C, 0, ('setVOID',), False, ("ENDF",)) elif name == "WCVTF": return Instruction(name, 0x70, 0, ('getCVT', 'getVAL'), False) elif name == "WCVTP": return Instruction(name, 0x44, 0, ('getCVT', 'getVAL'), False) elif name == "WS": return Instruction(name, 0x42, 0, ('setSTOR', 'getVAL'), False) else: raise HumbleError("Unsupported instruction: {}".format(name)) @staticmethod def newSubBlockInstruction(block): instruction = SubBlockInstruction() instruction.block = block return instruction @staticmethod def newOperationInstruction(symbol): if symbol == "==": return Instruction.newInstruction("EQ") elif symbol == "!=": return Instruction.newInstruction("NEQ") elif symbol == ">=": return Instruction.newInstruction("GTEQ") elif symbol == ">" : return Instruction.newInstruction("GT") elif symbol == "<=": return Instruction.newInstruction("LTEQ") elif symbol == "<" : return Instruction.newInstruction("LT") elif symbol == "+" : return Instruction.newInstruction("ADD") elif symbol == "-" : return Instruction.newInstruction("SUB") elif symbol == "*" : return Instruction.newInstruction("MUL") elif symbol == "/" : return Instruction.newInstruction("DIV") elif symbol == "or": return Instruction.newInstruction("OR") elif symbol == "and": return Instruction.newInstruction("AND") else: raise HumbleError("Invalid operator symbol: {}".format(symbol)) @staticmethod def _newDeltaInstruction(name): if name == "DELTAC1": return Instruction(name, 0x73, 0, None, False) elif name == "DELTAC2": return Instruction(name, 0x74, 0, None, False) elif name == "DELTAC3": return Instruction(name, 0x75, 0, None, False) elif name == "DELTAP1": return Instruction(name, 0x5D, 0, None, False) elif name == "DELTAP2": return Instruction(name, 0x71, 0, None, False) elif name == "DELTAP3": return Instruction(name, 0x72, 0, None, False) else: raise NameError(name) def __init__(self, name, opCode, maxFlag, recipe, mightPush): self.name = name self.opCode = opCode self.flag = 0 self.maxFlag = maxFlag self.recipe = recipe self.mightPush = mightPush self.pre = None self.arguments = [] self.post = None def setFlag(self, flag): if flag <= self.maxFlag: self.flag = flag else: raise HumbleError("Invalid flag value for {}: {}".format(self.name, flag)) def add(self, argument): self.arguments.append(argument) def canMerge(self, other): return False def merge(self, other): raise TypeError def canChain(self): return True def canPush(self): return self.mightPush or self.pre or (self.post and self.post.canPush()) def chain(self, other): if self.pre: self.pre.chain(other) else: for argument in self.arguments: if argument.canChain(): argument.chain(other) return if other.canPush(): self.pre = other elif self.post: self.post.chain(other) else: self.post = other def write(self, accumulator): if self.pre: self.pre.write(accumulator) for argument in self.arguments: argument.write(accumulator) if self.post: self.post.write(accumulator) if self.name: self._writeName(accumulator) def _writeName(self, accumulator): accumulator.writeInstruction(self.name, self.opCode, self.flag, self.maxFlag) class BlockInstruction(Instruction): def __init__(self, name, opCode, maxFlag, recipe, mightPush, stops): Instruction.__init__(self, name, opCode, maxFlag, recipe, mightPush) self.stops = stops self.block = Block() def canMerge(self, other): if isinstance(self.block.last, BlockInstruction): return True elif self.stops: return other.name not in self.stops else: return False def merge(self, other): self.block.add(other) def write(self, accumulator): Instruction.write(self, accumulator) self.block.write(accumulator.writer) class CallInstruction(Instruction): def __init__(self, name, opCode, maxFlag, recipe, mightPush, insertIndex): Instruction.__init__(self, name, opCode, maxFlag, recipe, mightPush) self.insertIndex = insertIndex def add(self, argument): if len(self.arguments) >= self.insertIndex: self.arguments.insert(-self.insertIndex, argument) else: self.arguments.append(argument) class DeltaInstruction(Instruction): def __init__(self, name, opCode, maxFlag, recipe, mightPush): Instruction.__init__(self, name, opCode, maxFlag, recipe, mightPush) self.target = None def add(self, argument): if self.target is not None: argument.target = self.target self.arguments.append(argument) else: self.target = argument.value def canMerge(self, other): return other.name == self.name def merge(self, other): self.arguments += other.arguments def write(self, accumulator): self.arguments.sort() deltaBase = self.arguments[0].ppem variant = 1 variantBase = deltaBase count = 0 block = Block() block.add(self.__makeSDB(deltaBase)) delta = self.__makeDELTA(variant) for argument in self.arguments: if argument.ppem > variantBase + 15: delta.add(IntegerArgument(count)) block.add(delta) count = 0 if argument.ppem > deltaBase + 47: deltaBase = argument.ppem variant = 1 variantBase = deltaBase block.add(self.__makeSDB(deltaBase)) else: variant = ((argument.ppem - deltaBase) >> 4) + 1 variantBase = deltaBase + ((variant-1) << 4) delta = self.__makeDELTA(variant) argument.base = variantBase delta.add(argument) count += 1 delta.add(IntegerArgument(count)) block.add(delta) if self.pre: self.pre.write(accumulator) elif self.post: block.last.chain(self.post) block.last.write(accumulator) def __makeDELTA(self, variant): return Instruction._newDeltaInstruction(self.name + str(variant)) def __makeSDB(self, base): sdb = Instruction.newInstruction("SDB") sdb.add(IntegerArgument(base)) return sdb class LoopInstruction(Instruction): LIMIT = 4 def __init__(self, name, opCode, maxFlag, recipe, mightPush): Instruction.__init__(self, name, opCode, maxFlag, recipe, mightPush) def canMerge(self, other): return other.name == self.name and other.flag == self.flag and \ self.arguments and other.arguments def merge(self, other): self.arguments += other.arguments def write(self, accumulator): block = Block() if len(self.arguments) >= self.LIMIT: sloop = Instruction.newInstruction("SLOOP") sloop.add(IntegerArgument(len(self.arguments))) block.add(sloop) push = Instruction.newInstruction("push") push.arguments = self.arguments block.add(push) if self.pre: self.pre.write(accumulator) elif self.post: block.last.chain(self.post) block.last.write(accumulator) args = len(self.arguments) for _ in range(args if args and args < self.LIMIT else 1): Instruction._writeName(self, accumulator) class SubBlockInstruction(Instruction): def __init__(self): Instruction.__init__(self, None, None, None, (), False) self.block = Block() def write(self, accumulator): Instruction.write(self, accumulator) self.block.write(accumulator.writer) ``` #### File: HumbleTypeInstructionCompiler/test/helper.py ```python import io import htic CVT = "cvt{ \n 0 cvt0 \n 10 cvt1 \n 20 cvt2 \n}" FLAGS = "flags{ x 1 \n m 11110 \n}" FPGM = "fpgm{ FDEF 0 func0 val \n POP \n ENDF \n FDEF 1 func1 val \n POP \n ENDF \n}" FPGMPARAMS = "fpgm{ FDEF 0 func0 val pt cvt func stor \n POP \n POP \n POP \n POP \n POP \n ENDF \n}" def getData(code): parser = htic.parser.Parser() return parser.parse(io.BytesIO(code)) def toBytes(instructions, precode="", name="A"): data = getData(precode + name + "{" + instructions + "\n}") translator = htic.translator.BinaryTranslator() if name == "prep": return translator.translate(data.prep) elif name == "fpgm": return translator.translate(data.fpgm) else: return translator.translate(data.glyphs[name]) ```
{ "source": "jenskutilek/jkFontGeometry", "score": 3 }
#### File: Lib/jkFontGeometry/cubics.py ```python from fontTools.misc.bezierTools import calcCubicParameters, solveCubic from math import hypot from jkFontGeometry.beziertools import ( estimateCubicCurveLength, getInflectionsForCubic, getExtremaForCubic, ) from jkFontGeometry.beziertools import getPointOnCubic as get_cubic_point from jkFontGeometry import Point from typing import Dict, List, Optional, Tuple DEBUG_SPLIT = False class Cubic: def __init__( self, p0: Point, p1: Point, p2: Point, p3: Point, raster_length: float = 0.25, ) -> None: self.p0 = p0 self.p1 = p1 self.p2 = p2 self.p3 = p3 # The estimated length of each distance if the cubic is converted # to points self.raster_length = raster_length # The list of points on the cubic, with estimated raster_length # distance self._cubic_points: Optional[List[Point]] = None self._num_cubic_points: Optional[int] = None # The calculated estimated curve length self._estimated_length: Optional[float] = None # The number of steps to achieve the desired point distances self._raster_steps: Optional[int] = None # The current split point (will be moved along the curve when # splitting) self._t = 0.0 # Cache for Cubic params (a, b, c, d) self._params: Optional[Tuple[Point, Point, Point, Point]] = None # Cache for inflection points self._inflections: Optional[List[float]] = None self._inflection_points: Optional[List[Point]] = None # Cache for extremum points self._extrema: Optional[List[float]] = None self._extremum_points: Optional[List[Point]] = None def __repr__(self) -> str: return "<Cubic p0=%s, p3=%s>" % (self.p0, self.p3) @property def extrema(self) -> List[float]: if self._extrema is None: self._extrema = self.calculate_extrema() return self._extrema @property def extremum_points(self) -> List[Point]: if self._extremum_points is None: self._extremum_points = self.calculate_extremum_points() return self._extremum_points @property def inflections(self) -> List[float]: if self._inflections is None: self._inflections = self.calculate_inflections() return self._inflections @property def inflection_points(self) -> List[Point]: if self._inflection_points is None: self._inflection_points = self.calculate_inflection_points() return self._inflection_points @property def length(self) -> float: if self._estimated_length is None: self._estimated_length = estimateCubicCurveLength( self.p0, self.p1, self.p2, self.p3 ) return self._estimated_length @property def params(self) -> Tuple[Point, Point, Point, Point]: if self._params is None: self._params = calcCubicParameters( self.p0, self.p1, self.p2, self.p3 ) return self._params @property def raster_steps(self) -> int: if self._raster_steps is None: self._raster_steps = int(round(self.length / self.raster_length)) return self._raster_steps @property def cubic_points(self) -> List[Point]: # Calculate or return the cached list of t to point mappings. if self._cubic_points is None: self._cubic_points = self.calculate_cubic_points() self._num_cubic_points = len(self._cubic_points) - 1 return self._cubic_points @property def num_cubic_points(self) -> int: if self._num_cubic_points is None: self._cubic_points = self.calculate_cubic_points() self._num_cubic_points = len(self._cubic_points) - 1 return self._num_cubic_points def calculate_cubic_points(self) -> List[Point]: # Return a list of point coordinates for the cubic curve according to # the current raster_steps value # st = time() t_list = [] if ( self.raster_steps < 2 or (self.p0 == self.p1) and (self.p2 == self.p3) ): t_list = [self.p0, self.p3] else: step = 1 / self.raster_steps t_list = [ get_cubic_point(t * step, self.p0, self.p1, self.p2, self.p3) for t in range(0, self.raster_steps + 1) ] # et = time() # print("calculate_cubic_points: %0.3f ms" % ((et-st)*1000)) return t_list def calculate_extrema(self) -> List[float]: return getExtremaForCubic( self.p0, self.p1, self.p2, self.p3, h=True, v=False, include_start_end=True, ) def calculate_extremum_points(self) -> List[Point]: return [ get_cubic_point(t, self.p0, self.p1, self.p2, self.p3) for t in self.extrema ] def calculate_inflections(self) -> List[float]: # TODO: Inflections "between" segments return getInflectionsForCubic(self.p0, self.p1, self.p2, self.p3) def calculate_inflection_points(self) -> List[Point]: return [ get_cubic_point(t, self.p0, self.p1, self.p2, self.p3) for t in self.inflections ] def reset_split(self) -> None: self._t = 0.0 def split_at_t(self, t: float) -> Tuple[Point, Point, Point, Point]: # From https://stackoverflow.com/questions/878862/drawing-part-of-a-bé # zier-curve-by-reusing-a-basic-bézier-curve-function # st = time() t0 = self._t t1 = t u0 = 1.0 - t0 u1 = 1.0 - t1 x1, y1 = self.p0 bx1, by1 = self.p1 bx2, by2 = self.p2 x2, y2 = self.p3 qxa = x1 * u0 * u0 + bx1 * 2 * t0 * u0 + bx2 * t0 * t0 qxb = x1 * u1 * u1 + bx1 * 2 * t1 * u1 + bx2 * t1 * t1 qxc = bx1 * u0 * u0 + bx2 * 2 * t0 * u0 + x2 * t0 * t0 qxd = bx1 * u1 * u1 + bx2 * 2 * t1 * u1 + x2 * t1 * t1 qya = y1 * u0 * u0 + by1 * 2 * t0 * u0 + by2 * t0 * t0 qyb = y1 * u1 * u1 + by1 * 2 * t1 * u1 + by2 * t1 * t1 qyc = by1 * u0 * u0 + by2 * 2 * t0 * u0 + y2 * t0 * t0 qyd = by1 * u1 * u1 + by2 * 2 * t1 * u1 + y2 * t1 * t1 xa = qxa * u0 + qxc * t0 xb = qxa * u1 + qxc * t1 xc = qxb * u0 + qxd * t0 xd = qxb * u1 + qxd * t1 ya = qya * u0 + qyc * t0 yb = qya * u1 + qyc * t1 yc = qyb * u0 + qyd * t0 yd = qyb * u1 + qyd * t1 self._t = t # et = time() # print("split_at_t: %0.3f ms" % ((et-st)*1000)) return ((xa, ya), (xb, yb), (xc, yc), (xd, yd)) class SuperCubic: # Collection of multiple Cubic segments def __init__(self) -> None: self.cubics: List[Cubic] = [] self._split_index = 0 # The cached map of t to point self._t_points: Dict[Point, Tuple[int, float]] = {} # Keep track of current t for faster searching self._t_step = 0 self._inflection_points: Optional[List[Point]] = None self._extremum_points: Optional[List[Point]] = None def __repr__(self) -> str: return "<SuperCubic len=%i>" % len(self.cubics) @property def inflection_points(self) -> List[Point]: # All inflection points from the sub-cubics if self._inflection_points is None: self._inflection_points = [] for cubic in self.cubics: if cubic.inflection_points: self._inflection_points.extend(cubic.inflection_points) return self._inflection_points @property def extremum_points(self) -> List[Point]: # All extremum points from the sub-cubics if self._extremum_points is None: self._extremum_points = [] for cubic in self.cubics: if cubic.extremum_points: self._extremum_points.extend(cubic.extremum_points) return self._extremum_points def add_cubic_from_points( self, p0: Point, p1: Point, p2: Point, p3: Point, raster_length: float = 0.25, ) -> None: cubic = Cubic(p0, p1, p2, p3, raster_length) self.cubics.append(cubic) def add_cubic_from_point_tuple( self, point_tuple: List[Point], raster_length: float = 0.25 ) -> None: num_points = len(point_tuple) if num_points == 4: p0, p1, p2, p3 = point_tuple elif num_points == 2: print("WARNING: Not a curve:", point_tuple) # Add a flat curve p0, p3 = point_tuple p1 = ( p0[0] + 0.333333 * (p3[0] - p0[0]), p0[1] + 0.333333 * (p3[1] - p0[1]), ) p2 = ( p0[0] + 0.666667 * (p3[0] - p0[0]), p0[1] + 0.666667 * (p3[1] - p0[1]), ) else: raise ValueError self.add_cubic_from_points(p0, p1, p2, p3, raster_length) def t_for_point(self, pt: Point) -> Optional[Tuple[int, float]]: # TODO: Cache previous pt so the search can start there? return self._t_points.get(pt, self.calculate_t_for_point(pt)) def calculate_t_for_point(self, pt) -> Optional[Tuple[int, float]]: # Calculate the t value for the closest distance of point pt to a # series of cubic Beziers x, y = pt # Check special case: Is the point close to the first or last points of # any of the cubics? for index in range(self._split_index, len(self.cubics)): cubic = self.cubics[index] p0x = round(cubic.p0[0]) p0y = round(cubic.p0[1]) p3x = round(cubic.p3[0]) p3y = round(cubic.p3[1]) if p0x - 1 <= x <= p0x + 1 and p0y - 1 <= y <= p0y + 1: self._split_index = index self._t_step = 0 tx, ty = get_cubic_point( 0, cubic.p0, cubic.p1, cubic.p2, cubic.p3 ) # print( # " " # "Fast Found t = 0 -> (%0.3f, %0.3f)" % (tx, ty) # ) return (index, 0.0) elif p3x - 1 <= x <= p3x + 1 and p3y - 1 <= y <= p3y + 1: self._split_index = index self._t_step = cubic.num_cubic_points tx, ty = get_cubic_point( 1, cubic.p0, cubic.p1, cubic.p2, cubic.p3 ) # print( # " " # "Fast Found t = 1 -> (%0.3f, %0.3f)" % (tx, ty) # ) return (index, 1.0) # Take the long road prev_dist: Optional[float] = None for index in range(self._split_index, len(self.cubics)): cubic = self.cubics[index] self._split_index = index for step in range(self._t_step, cubic.num_cubic_points + 1): p = cubic.cubic_points[step] px, py = p dist = hypot(y - py, x - px) # Point distance if prev_dist is not None and dist > prev_dist: if prev_dist is not None: index_step = ( index, step / cubic.num_cubic_points, ) self._t_points[pt] = index_step # print( # " " # f"Searching for t in cubic {self._split_index} " # f"from step {self._t_step} to {step} of " # f"{cubic.num_cubic_points} ..." # ) # tx, ty = get_cubic_point( # step / cubic.num_cubic_points, # cubic.p0, # cubic.p1, # cubic.p2, # cubic.p3, # ) # print( # " " # "Found t = %0.3f -> (%0.3f, %0.3f)" % ( # step / cubic.num_cubic_points, tx, ty # ) # ) self._t_step = step return index_step prev_dist = dist self.reset_t() prev_dist = None return None def reset_split(self) -> None: for c in self.cubics: c.reset_split() self._split_index = 0 self.reset_t() def reset_t(self) -> None: self._t_step = 0 def split_at_pt(self, pt: Point) -> Tuple[Point, Point, Point, Point]: if DEBUG_SPLIT: print("SuperCubic.split_at_pt", pt, "->") index_t = self.t_for_point(pt) if index_t is None: raise ValueError index, t = index_t # FIXME: This only splits inside one cubic segment? if DEBUG_SPLIT: print( " Splitting cubic %i from %0.4f to %0.4f ..." % (index, self.cubics[index]._t, t) ) # self._split_index = index return self.cubics[index].split_at_t(t) def split_at_pt_fast(self, pt: Point) -> Tuple[Point, Point, Point, Point]: if DEBUG_SPLIT: print("SuperCubic.split_at_pt_fast", pt, "->") index = 0 x, y = pt a, b, c, d = self.cubics[0].params solutions_h = solveCubic(a[1], b[1], c[1], d[1] - y) solutions_v = solveCubic(a[0], b[0], c[0], d[0] - x) solutions_h = [t for t in solutions_h if 0 <= t < 1] solutions_v = [t for t in solutions_v if 0 <= t < 1] if DEBUG_SPLIT: print(solutions_h, solutions_v) if len(solutions_h) == 1 and solutions_v: # Take the average of both values t = (solutions_v[0] + solutions_h[0]) * 0.5 else: print( " Different number of solutions for h and v:", solutions_h, solutions_v, ) index_t = self.t_for_point(pt) if index_t is None: raise ValueError index, t = index_t print(" Choosing via thorough method:", t) self._split_index = index return self.cubics[index].split_at_t(t) def split_remainder(self) -> Tuple[Point, Point, Point, Point]: return self.cubics[self._split_index].split_at_t(1.0) ``` #### File: Lib/jkFontGeometry/geometryPoints.py ```python from jkFontGeometry import geometry # Helper functions for geometry, point-based API with pt.x and pt.y # These are just wrapper functions for the tuple-based functions in # jkRFTools.geometry. def angle_between_points(p0, p1, do_round=False): return geometry.angle_between_points((p0.x, p0.y), (p1.x, p1.y), do_round) def distance_between_points(p0, p1, do_round=False): return geometry.distance_between_points( (p0.x, p0.y), (p1.x, p1.y), do_round ) def half_point(p0, p1, do_round=False): # Get the coordinate tuple that lies halfway between two other points. hp = p0.copy() hp.x, hp.y = geometry.half_point((p0.x, p0.y), (p1.x, p1.y), do_round) return hp def round_point(pt): return (int(round(pt.x)), int(round(pt.y))) def round_point_conditional(pt, do_round=True): if do_round: return (int(round(pt.x)), int(round(pt.y))) else: return pt # Triangle Geometry # p0 is the first point of the Bezier segment and p3 the last point. # p1 is the handle of p0 and p2 the handle of p3. # A triangle is formed: # b = hypotenuse, the line from p0 to p3 # a = p0 to I with I being the intersection point of the lines p0 to p1 # and p3 to p2 # c = p3 to I " # alpha = the angle between p0p1 and p0p3 # beta = the angle between p3p0 and p3p2 # gamma = the angle between p3I and p0I def triangle_angles(p0, p1, p2, p3): # Calculate the angles return geometry.triangle_angles( (p0.x, p0.y), (p1.x, p1.y), (p2.x, p2.y), (p3.x, p3.y), ) def triangle_area(a, b, c, do_round=False): return geometry.triangle_area( (a.x, a.y), (b.x, b.y), (c.x, c.y), do_round, ) def triangle_sides(p0, p1, p2, p3): # Calculate the sides of the triangle return geometry.triangle_sides( (p0.x, p0.y), (p1.x, p1.y), (p2.x, p2.y), (p3.x, p3.y), ) def is_on_left(a, b, c): # Is point c on the left of ab? return geometry.is_on_left((a.x, a.y), (b.x, b.y), (c.x, c.y),) def is_on_right(a, b, c): # Is point c on the right of ab? return geometry.is_on_right((a.x, a.y), (b.x, b.y), (c.x, c.y),) def is_collinear(a, b, c): # Is point c on ab? return geometry.is_collinear((a.x, a.y), (b.x, b.y), (c.x, c.y),) ```
{ "source": "jenskutilek/jkRFExtensionSettings", "score": 3 }
#### File: lib/jkRFExtensionSettings/Setting.py ```python class Setting(object): def __init__(self, key, default_value, name=None): self._key = key self._default_value = default_value self._name = name self._ui_object = None def _get_key(self): return self._key def _set_key(self, value): self._key = value key = property(_get_key, _set_key) def _get_value(self): if self._ui_object is None: return self._value else: return self._ui_object.get() def _set_value(self, value): self._value = value value = property(_get_value, _set_value) def _get_default_value(self): return self._default_value def _set_default_value(self, value): self._default_value = value default_value = property(_get_default_value, _set_default_value) def _get_name(self): if self._name is None: return self._key return self._name def _set_name(self, value): self._name = value name = property(_get_name, _set_name) def _get_ui_object(self): return self._ui_object def _set_ui_object(self, value): self._ui_object = value ui_object = property(_get_ui_object, _set_ui_object) def is_default(self): if self._value == self._default_value: return True return False ```
{ "source": "jenskutilek/jkRFoTools", "score": 3 }
#### File: Lib/jkRFoTools/unitization.py ```python class UnitizationInfo(object): def __init__(self): self.systems = {} def add_system(self, system): if system.upm in self.systems: self.systems[system.upm].append(system) else: self.systems[system.upm] = [system] def get_systems_by_upm(self, upm): if upm in self.systems: return self.systems[upm] else: return [] def get_all_units(self): return self.systems.keys() class UnitSystem(object): def __init__(self, name, units, min_units=None, max_units=None, strategy="free", unit_dict={}): self.name = name self.upm = units self.min_units = min_units self.max_units = max_units # unitization strategies if strategy == "alleq": self.set_all_equal() elif strategy == "fixed": if unit_dict: self.set_fixed_units(unit_dict) else: raise "Must supply unit_dict when using fixed units" elif strategy == "free": self.set_free_units() else: raise "Unknown unitization strategy." def set_fixed_units(self, unit_dict): self.all_equal = False self.fixed_units = unit_dict self.free_units = False def set_all_equal(self, all_equal=True): self.all_equal = all_equal self.fixed_units = {} self.free_units = False def set_free_units(self, free_units=True): self.all_equal = False self.fixed_units = {} self.free_units = free_units # define known unit systems # source: <http://www.quadibloc.com/comp/propint.htm> unitization_info = UnitizationInfo() unitization_info.add_system(UnitSystem("Monospaced", 1, 0, 1, "alleq")) unitization_info.add_system(UnitSystem("IBM Executive", 5, 2, 5, "fixed", { 2: ["f", "i", "j", "l", "t", "I", ".", ",", ":", ";", "'", "!", "(", ")", " "], 3: ["a", "b", "c", "d", "e", "g", "h", "k", "n", "o", "p", "q", "r", "s", "u", "v", "x", "y", "z", "J", "S", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", '"', "?", "#", "+", "-", "*", "/", "="], 4: ["w", "A", "B", "C", "D", "E", "F", "G", "H", "K", "L", "N", "O", "P", "Q", "R", "T", "U", "V", "X", "Y", "Z", "&"], 5: ["m", "W", "M", "@", "%", "_", u"½", u"¼"], })) unitization_info.add_system(UnitSystem("Mag Card Executive", 7, 3, 7, "fixed", { 3: ["i", "j", "l"], 4: ["f", "t", "I", "'"], 5: ["a", "c", "e", "h", "k", "n", "o", "r", "s", "u", "v", "x", "z", "J", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ".", ",", ":", ";", "!", "?", '"', "@", "#", "$", "&", "¢", "(", ")", "+", "-", "*", "/", "=", "_", u"½", u"¼", " "], 6: ["b", "d", "g", "p", "q", "y", "E", "F", "L", "P", "S", "Z"], 7: ["m", "w", "A", "B", "C", "D", "G", "H", "K", "M", "N", "O", "Q", "R", "T", "U", "V", "W", "X", "Y"], })) unitization_info.add_system(UnitSystem("IBM Selectric Composer", 9, 3, 9, "fixed", { 3: ["i", "j", "l", ".", ",", ";", "`", "'", "-", " "], 4: ["f", "t", "r", "s", "I", ":", "!", "(", ")", "/"], 5: ["a", "c", "e", "g", "z", "J", "[", "?"], 6: ["b", "d", "h", "k", "n", "o", "p", "q", "u", "v", "x", "y", "P", "S", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "]", "+", "*", "=", "$", u"†"], 7: ["B", "C", "E", "F", "L", "T", "Z"], 8: ["w", "A", "D", "G", "H", "K", "N", "O", "Q", "R", "U", "V", "X", "Y", u"&", u"@", u"%", u"½", u"¼", u"¾", u"—"], 9: ["m", "W", "M"], })) unitization_info.add_system(UnitSystem("Monotype hot metal and early photo typesetting", 18, 0, 18, "free")) unitization_info.add_system(UnitSystem("Linotype early photo typesetting", 18, 0, 18, "free")) unitization_info.add_system(UnitSystem("Berthold photo typesetting", 48, 0, 48, "free")) unitization_info.add_system(UnitSystem("Linotype later photo typesetting (3 x 18)", 54, 0, 54, "free")) ```
{ "source": "jenskutilek/nibLib", "score": 3 }
#### File: nibLib/DrawBot/Ellipse.py ```python Variable([ dict(name="a", ui="Slider", args=dict( value=300, minValue=0, maxValue=500)), dict(name="b", ui="Slider", args=dict( value=200, minValue=0, maxValue=500)), dict(name="alpha", ui="Slider", args=dict( value=45, minValue=0, maxValue=180)), dict(name="phi", ui="Slider", args=dict( value=45, minValue=0, maxValue=180)), dict(name="nib_angle", ui="Slider", args=dict( value=30, minValue=-90, maxValue=90)), ], globals()) alpha = radians(alpha) phi = radians(phi) nib_angle = radians(nib_angle) def _get_point_on_ellipse_at_tangent_angle(a, b, alpha): print "Tangent angle: %0.2f°" % degrees(alpha) t = atan2(- b , (a * tan(alpha))) t = atan(-b /(a * tan(alpha))) print "Parameter t: %0.2f" % (t + nib_angle) return _get_point_on_ellipse_at_t(a, b, t) def _get_point_on_ellipse_at_t(a, b, t): # t is not the angle, it is just a parameter x = a * cos(t) y = b * sin(t) return x, y def _get_point_on_ellipse_at_angle_from_center(a, b, phi): # This time, phi is the real center angle div = sqrt(a**2 * tan(phi) ** 2 / b ** 2 + 1) x = a / div y = a * tan(phi) / div return x, y def draw_arrowhead(size=10, x=0, y=0): line((x-size, y+size), (x, y)) line((x, y), (x-size, y-size)) def draw_cross(size=10, x=0, y=0, phi=45): save() rotate(phi) line((x-size, y+size), (x+size, y-size)) line((x+size, y+size), (x-size, y-size)) restore() newDrawing() size(1000, 1000) fill(None) stroke(0) strokeWidth(0.25) translate(500, 500) save() rotate(degrees(nib_angle)) line((-500, 0), (500, 0)) line((0, 500), (0, -500)) oval(-a, -b, 2*a, 2*b) restore() x, y = _get_point_on_ellipse_at_angle_from_center(a, b, phi) print "Center angle: %0.2f°" % degrees(phi) save() stroke(1, 0, 0) rotate(degrees(nib_angle)) line((0, 0), (x, y)) rect(x-1, y-1, 2, 2) restore() stroke(0, 0, 1) x, y = _get_point_on_ellipse_at_tangent_angle(a, b, alpha - nib_angle) print "Point on ellipse: %0.2f | %0.2f (ellipse reference system)" % (x, y) save() stroke(0,1,0) rotate(degrees(nib_angle)) line((0, 0), (x, y)) rect(x-1, y-1, 2, 2) restore() print "Nib angle: %0.2f°" % degrees(nib_angle) # Calculate the un-transformed point coordinates xnew = x * cos(nib_angle) - y * sin(nib_angle) ynew = x * sin(nib_angle) + y * cos(nib_angle) print "Point on ellipse: %0.2f | %0.2f" % (xnew, ynew) line((0, 0), (xnew, ynew)) rect(xnew-1, ynew-1, 2, 2) translate(xnew, ynew) save() rotate(degrees(alpha)) line((-200, 0), (200, 0)) draw_cross(5, phi = degrees(-alpha)) draw_arrowhead(5, -200) draw_arrowhead(5, 200) # draw the center angle arc rotate(degrees(-alpha)) stroke(1, 0, 1, 0.2) strokeWidth(1) line((0, 0), (100, 0)) newPath() arc((0, 0), 80, 0, degrees(alpha), False) drawPath() strokeWidth(0) fill(1, 0, 1, 0.5) fontSize(12) font("Times-Italic") text("𝛼 = %0.1f°" % degrees(alpha), (20, 10)) restore() ``` #### File: nibLib/DrawBot/EllipseSuper.py ```python import cmath # a = 300 # b = 200 # phi = radians(45) # alpha = 30 # nib_angle = 5 Variable([ dict(name="a", ui="Slider", args=dict( value=300, minValue=0, maxValue=500)), dict(name="b", ui="Slider", args=dict( value=200, minValue=0, maxValue=500)), dict(name="n", ui="Slider", args=dict( value=2.5, minValue=2, maxValue=10)), dict(name="phi", ui="Slider", args=dict( value=45, minValue=0, maxValue=360)), dict(name="alpha", ui="Slider", args=dict( value=45, minValue=0, maxValue=360)), dict(name="nib_angle", ui="Slider", args=dict( value=0, minValue=-90, maxValue=90)), ], globals()) alpha = radians(alpha) nib_angle = radians(nib_angle) phi = radians(phi) # Path optimization tools def distanceBetweenPoints(p0, p1, doRound=False): # Calculate the distance between two points d = sqrt((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2) if doRound: return int(round(d)) else: return d class Triangle(object): def __init__(self, A, B, C): self.A = A self.B = B self.C = C def sides(self): self.a = distanceBetweenPoints(self.B, self.C) self.b = distanceBetweenPoints(self.A, self.C) self.c = distanceBetweenPoints(self.A, self.B) return self.a, self.b, self.c def height_a(self): a, b, c = self.sides() s = (a + b + c) / 2 h = 2 * sqrt(s * (s-a) * (s-b) * (s-c)) / a return h def optimizePointPath(p, dist=0.49): print("Input number of points:", len(p)) num_points = len(p) p0 = p[0] optimized = [p0] i = 0 j = 1 while i < num_points -2: p1 = p[i+1] p2 = p[i+2] t = Triangle(p0, p2, p1) h = t.height_a() # print(i, h) if t.height_a() > dist: optimized.extend([p1]) p0 = p[i] else: pass # print("Skip:", i+1, p1) i += 1 j += 1 # if j > 13: # break optimized.extend([p[-1]]) print("Optimized number of points:", len(optimized)) return optimized def get_superellipse_points(a, b, n, alpha=0, steps=100): points = [] for i in range(0, steps + 1): t = i * 0.5 * pi / steps points.append(( a * cos(t) ** (2 / n) * cos(alpha) - b * sin(t) ** (2 / n) * sin(alpha), a * cos(t) ** (2 / n) * sin(alpha) + b * sin(t) ** (2 / n) * cos(alpha), )) try: points = optimizePointPath(points, 1) except: print("oops") pass points.extend([(-p[0], p[1]) for p in reversed(points)]) points.extend([(-p[0], -p[1]) for p in reversed(points)]) return points def superellipse(a, b, n, alpha, steps=100): points = get_superellipse_points(a, b, n, 0, steps) save() rotate(degrees(alpha)) newPath() moveTo(points[0]) for p in points[1:]: lineTo(p) closePath() drawPath() restore() # def _get_point_on_ellipse_at_tangent_angle(a, b, alpha): # # phi = atan2( # - b, # a * tan(alpha) # ) # # x = a * cos(phi) # y = b * sin(phi) # # return x, y def _get_point_on_superellipse_at_tangent_angle(a, b, n, alpha): print("a =", a) print("b =", b) exp = 1 / (2 - 2 / n) + 0j print("Exponent:", exp) factor1 = (-b / a) ** -exp factor2 = cmath.tan(alpha) ** exp print("factor1 =", factor1) print("factor2 =", factor2) phi = atan2( 1, (factor1 * factor2).real, ) print("phi =", degrees(phi)) return _get_point_on_superellipse_at_angle_from_center(a, b, n, phi) def _get_point_on_superellipse_at_angle_from_center(a, b, n, phi): x = a * cos(phi) ** (2 / n + 0j) y = b * sin(phi) ** (2 / n + 0j) print(x, y) return x.real, y.real def draw_arrowhead(size=10, x=0, y=0): line((x-size, y+size), (x, y)) line((x, y), (x-size, y-size)) def draw_cross(size=10, x=0, y=0, phi=45): save() rotate(phi) line((x-size, y+size), (x+size, y-size)) line((x+size, y+size), (x-size, y-size)) restore() print("Superellipse with n = %0.2f" % n) newDrawing() size(1000, 1000) fill(None) stroke(0) strokeWidth(0.25) translate(500, 500) save() rotate(degrees(nib_angle)) stroke(1, 0, 0) line((-500, 0), (500, 0)) line((0, 500), (0, -500)) stroke(1, 1, 0) oval(-a, -b, 2*a, 2*b) rotate(degrees(-nib_angle)) stroke(0) strokeWidth(1) superellipse(a, b, n, nib_angle) restore() save() rotate(degrees(nib_angle)) stroke(0, 1, 0) x, y = _get_point_on_superellipse_at_angle_from_center(a, b, n, phi) line((0, 0), (x, y)) rect(x-1, y-1, 2, 2) restore() stroke(0, 0, 1) x, y = _get_point_on_superellipse_at_tangent_angle(a, b, n, alpha - nib_angle) print(x, y) save() stroke(0,1,0) rotate(degrees(nib_angle)) line((0, 0), (x, y)) rect(x-1, y-1, 2, 2) restore() # print("Nib angle:", degrees(nib_angle)) # Calculate the un-transformed point coordinates xnew = x * cos(nib_angle) - y * sin(nib_angle) ynew = x * sin(nib_angle) + y * cos(nib_angle) # print(xnew, ynew) line((0, 0), (xnew, ynew)) rect(xnew-1, ynew-1, 2, 2) translate(xnew, ynew) save() rotate(degrees(alpha)) line((-300, 0), (300, 0)) draw_cross(5, phi = degrees(-alpha)) draw_arrowhead(5, -300) draw_arrowhead(5, 300) # draw the center angle arc rotate(degrees(-alpha)) stroke(1, 0, 1, 0.2) strokeWidth(1) line((0, 0), (100, 0)) newPath() arc((0, 0), 80, 0, degrees(alpha), False) drawPath() strokeWidth(0) fill(1, 0, 1, 0.5) fontSize(12) font("Times-Italic") text("𝛼 = %0.1f°" % degrees(alpha), (20, 10)) restore() ```
{ "source": "jenskutilek/PythonMacTabletLib", "score": 2 }
#### File: PythonMacTabletLib/Python/wxTest.py ```python import wx import ctypes # # === INSTRUCTIONS === # # 1. COMPILE THE XCODE PROJECT FIRST. # Make sure to compile it in 32-bit, since wxPython relies on 32-bit mode # # 2. COPY THE .so FILE THAT WAS CREATED TO THE FOLDER # WHERE THIS PYTHON SCRIPT IS IN # # 3. RUN THIS PYTHON SCRIPT IN THE TERMINAL # Make sure to run Python in 32-bit mode # # 4. MOVE YOUR CURSOR TO THE WINDOW AND PRESS THE STYLUS HARD ON THE TABLET # You should see the pressure value change accordingly # tablet = ctypes.cdll.LoadLibrary('./libTabletLibC.so') # you might want to wrap this in some class tablet.get_point_x.restype = ctypes.c_longlong tablet.get_point_y.restype = ctypes.c_longlong tablet.get_point_z.restype = ctypes.c_longlong tablet.get_buttons.restype = ctypes.c_longlong tablet.get_tilt_x.restype = ctypes.c_double tablet.get_tilt_y.restype = ctypes.c_double tablet.get_rotation.restype = ctypes.c_double tablet.get_pressure.restype = ctypes.c_double tablet.get_tangent_pressure.restype = ctypes.c_double tablet.get_vendor1.restype = ctypes.c_longlong tablet.get_vendor2.restype = ctypes.c_longlong tablet.get_vendor3.restype = ctypes.c_longlong tablet.get_vendor_id.restype = ctypes.c_longlong tablet.get_tablet_id.restype = ctypes.c_longlong tablet.get_pointer_id.restype = ctypes.c_longlong tablet.get_device_id.restype = ctypes.c_longlong tablet.get_system_tablet_id.restype = ctypes.c_longlong tablet.get_vendor_pointer_type.restype = ctypes.c_longlong tablet.get_vendor_pointer_serial_number.restype = ctypes.c_longlong tablet.get_vendor_unique_id.restype = ctypes.c_longlong tablet.get_capability_mask.restype = ctypes.c_longlong tablet.get_pointer_type.restype = ctypes.c_longlong tablet.get_enter_proximity.restype = ctypes.c_longlong class MyFrame(wx.Frame): def __init__(self, parent, title): wx.Frame.__init__(self, parent, title=title, size=(500,500)) self.textBox = wx.TextCtrl(self, style=wx.TE_MULTILINE) self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouseEvent) self.textBox.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouseEvent) self.Show(True) def OnMouseEvent(self, event): self.textBox.AppendText("x: " + str(tablet.get_point_x()) + " y: " + str(tablet.get_point_y()) + " p: " + str(tablet.get_pressure()) + "\n") print "start: " + str(tablet.init()) # non-zero means error app = wx.App(False) frame = MyFrame(None, 'Test') app.MainLoop() print "end: " + str(tablet.stop()) # non-zero means error ```
{ "source": "jenskutilek/RoboChrome", "score": 2 }
#### File: RoboChrome.roboFontExt/lib/RoboChrome.py ```python from AppKit import NSColor, NSCalibratedRGBColorSpace # from AppKit import NSAttributedString, NSForegroundColorAttributeName # for popup listbox cells from defconAppKit.windows.baseWindow import BaseWindowController from mojo.events import addObserver, removeObserver from mojo.drawingTools import drawGlyph, fill, line, rect, restore, save, scale, stroke, strokeWidth, translate from mojo.UI import UpdateCurrentGlyphView, CurrentGlyphWindow # from mojo.UI import setGlyphViewDisplaySettings, getGlyphViewDisplaySettings from os.path import basename, exists from re import compile from RoboChromeUI import get_ui, get_drawer from colorfont import ColorFont class ColorFontEditor(BaseWindowController): def __init__(self): self.libkey = "com.fontfont.colorfont" self._font = None self.show_only_glyphs_with_layers = True self.color = NSColor.blackColor() self.colorbg = NSColor.whiteColor() self._selected_color_index = None # live update the canvas when glyphs are edited self._debug_enable_live_editing = True self._auto_layer_regex_ok = True # self.oldDisplaySettings = getGlyphViewDisplaySettings() # setGlyphViewDisplaySettings({"On Curve Points": False, "Off Curve Points": False}) self.w = get_ui(self, "None") self.d = get_drawer(self) self.setUpBaseWindowBehavior() # self._callback_ui_glyph_list_selection() addObserver(self, "_observer_glyph_changed", "currentGlyphChanged") addObserver(self, "_observer_draw_glyph_window", "drawBackground") addObserver(self, "_observer_draw_glyph_window", "drawInactive") addObserver(self, "_observer_font_will_close", "fontWillClose") addObserver(self, "_observer_font_did_open", "fontDidOpen") # grey out controls that are not implemented yet self.d.generateGoogleFormat.enable(False) self.d.preferPlacedImages.enable(False) # disable regex check box, because it is read only self.d.auto_layer_regex_ok.enable(False) # If sbix or cbdt is inactive, disable bitmap sizes box # self._check_bitmap_ui_active() self.w.open() self.font = CurrentFont() # self._update_ui() # if CurrentGlyph() is not None: # self.glyphPreview = CurrentGlyph().name # if self.glyphPreview in self.cfont.keys(): # self.layer_glyphs_glyph_window = self.cfont[self.glyphPreview].layers # self._cache_color_info_glyph_window() # UpdateCurrentGlyphView() @property def font(self): return self._font @font.setter def font(self, value): self._font = value self.cfont = ColorFont(self._font) self.glyph = None self.glyphPreview = None self.width = 0 self.palette_index = 0 # currently active palette self.currentPaletteChanged = False self.currentGlyphChanged = False self.layer_glyphs = [] self.layer_colors = [] self.layer_glyphs_glyph_window = [] self.layer_colors_glyph_window = [] if self._font is not None: self.metrics = ( self._font.info.descender, self._font.info.xHeight, self._font.info.capHeight, self._font.info.ascender, self._font.info.unitsPerEm, ) else: self.metrics = (-200, 500, 700, 800, 1000) self.scale = 180 / (self.metrics[3] - self.metrics[0]) # load color data from rfont if self._font: self.cfont.read_from_rfont() self._update_ui() UpdateCurrentGlyphView() def _update_ui(self): # update ui self._callback_update_ui_formats() self.d.generate_sbix_sizes.set(self._ui_get_sbix_sizes()) self.d.auto_layer_regex_box.set(self.cfont.auto_layer_regex) self.d._add_base_layer.set(self.cfont.auto_layer_include_baseglyph) self.d.preferPlacedImages.set(self.cfont.prefer_placed_images) self._ui_update_palette_chooser() self._ui_update_palette(self.palette_index) self._callback_update_ui_glyph_list() if len(self.cfont) > 0: self.w.glyph_list.setSelection([0]) if len(self.cfont) > 0: self.w.auto_layer_button.enable(False) if self._font: title = basename(self._font.fileName) else: title = "None" self.w.setTitle("%s – RoboChrome" % title) # def _getColorPopupList(self): # # build a list of current palette's colors that can be assigned # # to the color popup in the layer list # # FIXME: It seems the cell's popup list can't be changed easily after # # the list has been built. # return [NSAttributedString.alloc().initWithString_attributes_(str(entry["layer_index"]), {NSForegroundColorAttributeName: entry["Color"]}) for entry in self.w.colorpalette] def _show_font_info(self, sender=None): print(self.cfont) def _choose_file_to_import(self, sender=None): self.showGetFile( [ "public.opentype-font", "public.truetype-ttf-font" ], self._import_from_font ) def _import_from_font(self, file_paths=None): if file_paths is not None: self.cfont = ColorFont(CurrentFont()) self.cfont.import_from_otf(file_paths[0]) self.cfont.save_to_rfont() self.cfont.save_all_glyphs_to_rfont() self._ui_update_palette_chooser() self._ui_update_palette(self.palette_index) self._callback_update_ui_glyph_list() if len(self.cfont) > 0: self.w.glyph_list.setSelection([0]) def _choose_file_to_export(self, sender=None): pathkey = "com.typemytype.robofont.compileSettings.path" _font = -1 if pathkey in self.font.lib: _font = self.font.lib.get(pathkey) if not exists(_font): _font = -1 if _font == -1: self.showPutFile( [ "public.opentype-font", "public.truetype-ttf-font" ], self._export_to_font ) else: self._export_to_font(_font) def _export_to_font(self, file_path=None): print("_export_to_font", file_path) if file_path is not None: if len(self.cfont.palettes[0]) > 0: print("Exporting to", file_path) self.cfont.export_to_otf( file_path, palette_index=self.palette_index, parent_window=self.w, ) else: print("ERROR: No color data in UFO.") def _choose_png_to_export(self, sender=None): self.showPutFile( ["public.png"], self._save_png, "%s.png" % self.glyph ) def _save_png(self, png_path=None): # save current glyph as PNG if png_path is not None: self.cfont.export_png( self.glyph, png_path, self.palette_index, self.font.info.unitsPerEm ) def _ui_update_layer_list(self): # set layer UI for current glyph _ui_list = [] if self.glyph in self.cfont.keys(): for i in range(len(self.cfont[self.glyph].layers)): g = self.cfont[self.glyph].layers[i] if g in self.font.keys(): _ui_list.append({ "layer_index": str(i), "layer_color_index": self.cfont[self.glyph].colors[i], # "Color": self._getColorPopupList(), "Layer Glyph": g, }) else: print("Warning: Missing layer glyph '%s' referenced in glyph '%s'." % (g, self.glyph)) # print("DEBUG: self.w.layer_list.set(_ui_list)") self.w.layer_list.set(_ui_list) # cache for faster drawing self._cache_layer_info() self._cache_color_info() def _ui_layer_list_save_to_cfont(self): # save the ui layer list to colorfont # print("DEBUG ColorFontEditor._ui_layer_list_save_to_cfont") if self.glyph is not None: if self.glyph in self.cfont.keys(): layerGlyphs = [] _layer_colors = [] for layerDict in sorted( self.w.layer_list.get(), key=lambda k: int(k["layer_index"]) ): layerGlyphs.append(layerDict["Layer Glyph"]) _layer_colors.append(int(layerDict["layer_color_index"])) if len(layerGlyphs) > 0 or len(_layer_colors) > 0: _modified = False if self.cfont[self.glyph].layers != layerGlyphs: self.cfont[self.glyph].layers = layerGlyphs _modified = True if self.cfont[self.glyph].colors != _layer_colors: self.cfont[self.glyph].colors = _layer_colors _modified = True if _modified: self.cfont.save_glyph_to_rfont(self.glyph) else: # empty layers, delete from lib # print("DEBUG Delete info for glyph", self.glyph) del self.cfont[self.glyph] self.cfont.save_glyph_to_rfont(self.glyph) # else: # print(" Glyph is not in ColorFont, not saving:", self.glyph) # else: # print(" Glyph is None.") def _ui_get_sbix_sizes(self): # get the display string for a python list return str(self.cfont.bitmap_sizes).strip("[]") def _reset_color_data(self, sender=None): # completely remove color info from UFO if self.font is not None: # font lib if "%s.colorpalette" % self.libkey in self.font.lib.keys(): del self.font.lib["%s.colorpalette" % self.libkey] if "%s.color" % self.libkey in self.font.lib.keys(): del self.font.lib["%s.color" % self.libkey] if "%s.colorbg" % self.libkey in self.font.lib.keys(): del self.font.lib["%s.colorbg" % self.libkey] # glyph lib for g in self.font: if "%s.layers" % self.libkey in g.lib.keys(): del g.lib["%s.layers" % self.libkey] self.layer_glyphs = [] self.layer_colors = [] self.font.update() self.cfont = ColorFont(self.font) self.color = self.getNSColor(self.cfont.color) self.colorbg = self.getNSColor(self.cfont.colorbg) # Reset UI self.w.colorpalette.set([{"Index": 0xffff, "Color": self.color}]) self._callback_update_ui_glyph_list() self._callback_ui_glyph_list_selection() self.d.generate_sbix_sizes.set(self._ui_get_sbix_sizes()) self.d.auto_layer_regex_box.set(self.cfont.auto_layer_regex) self.w.auto_layer_button.enable(True) self._callback_update_ui_formats() def addColorToPalette(self, sender=None): # add a new color to the current palette # find a new palette index paletteIndices = sorted( self.cfont.palettes[0].keys(), key=lambda k: int(k) ) if len(paletteIndices) > 0: newIndex = int(paletteIndices[-1]) + 1 else: newIndex = 0 if newIndex < 0xffff: # add new color to current palette self.w.colorpalette.append( { "Index": newIndex, "Color": NSColor.yellowColor() } ) # add new color to all other palettes for p in self.cfont.palettes: p[newIndex] = "#ffff0bff" self.cfont.save_settings = True self._paletteWriteToColorFont() self.currentPaletteChanged = False else: print("ERROR: Color Index 0xffff is reserved.") def _get_palette_color_ui_index_for_layer_color_index(self, index): # find the index of a color in the palette # (= ui list index, not layer_color_index) _palette = self.w.colorpalette.get() for i in range(len(_palette)): if int(_palette[i]["Index"]) == index: return i return None def _cache_layer_info(self): # self.layer_glyphs is used for drawing _layers = sorted( self.w.layer_list.get(), key=lambda k: int(k["layer_index"]) ) if _layers == []: self.layer_glyphs = [{ "layer_color_index": 0xffff, # "Color": self._getColorPopupList(), "layer_index": 0, "Layer Glyph": self.glyph }] else: self.layer_glyphs = _layers def _cache_color_info(self): # print("DEBUG _cache_color_info") # write colors for current glyph to self.layer_colors # for faster drawing colorDict = self.getColorDict() _layer_colors = [] for g in self.layer_glyphs: colorIndex = int(g["layer_color_index"]) if colorIndex == 0xffff: _layer_colors.append(self.color) else: if colorIndex in colorDict.keys(): _layer_colors.append(colorDict[colorIndex]) else: print("Missing color index in palette %i: %i (in glyph /%s)" % ( self.palette_index, colorIndex, g["Layer Glyph"] )) self.layer_colors = _layer_colors # update color list in layer list popup # self.w.layer_list["Color"].set(self._getColorPopupList()) def _cache_color_info_glyph_window(self): # print("DEBUG _cache_color_info_glyph_window") # write colors for current glyph to self.layer_colors_glyph_window for faster drawing _layer_colors = [] if self.glyphPreview is not None and self.glyphPreview in self.cfont.keys(): colorDict = self.getColorDict() for colorIndex in self.cfont[self.glyphPreview].colors: if colorIndex == 0xffff: _layer_colors.append(self.color) else: if colorIndex in colorDict.keys(): _layer_colors.append(colorDict[colorIndex]) else: print("Missing color in palette %i: %i (in glyph /%s)" % ( self.palette_index, colorIndex, self.glyphPreview )) self.layer_colors_glyph_window = _layer_colors # layer callbacks def _callback_layer_add(self, sender): if self.glyph is not None: self.cfont.save_settings = True if CurrentGlyph() is not None: newlayer = CurrentGlyph().name else: newlayer = self.glyph _color = self.getSelectedColorIndex() if _color is None: _color = 0xffff self.w.layer_list.append({ "layer_index": str(len(self.w.layer_list)+1), # "Color": self._getColorPopupList(), "layer_color_index": _color, "Layer Glyph": newlayer, }) # self._ui_layer_list_save_to_cfont() if self.glyph not in self.cfont.keys(): # print("DEBUG: Add new layer glyph to cfont") self.cfont.add_glyph(self.glyph) # self._ui_layer_list_save_to_cfont() sel = self.w.glyph_list.getSelection() self._callback_update_ui_glyph_list() self.w.glyph_list.setSelection(sel) def _callback_layer_edit(self, sender=None): # editing a layer (= change color index or glyph name or z-index) # print("DEBUG: _callback_layer_edit") # print(" Sender:", sender.get()) self._cache_layer_info() self._cache_color_info() self.w.preview.update() def _callback_layer_select(self, sender): # a layer has been selected in the layers list. Select corresponding color in the palette. sel = sender.getSelection() layers = sender.get() if sel == []: self.w.colorpalette.setSelection([]) self._selected_color_index = None else: i = int(layers[sel[0]]["layer_color_index"]) self._selected_color_index = self._get_palette_color_ui_index_for_layer_color_index(i) if self._selected_color_index is None: self.w.colorpalette.setSelection([]) else: self.w.colorpalette.setSelection([self._selected_color_index]) self.w.preview.update() """example dropinfo: Something was dropped on the layer list: {'rowIndex': 0, 'source': <objective-c class NSColorPanel at 0x7fff72b29ef0>, 'data': { "$archiver" = NSKeyedArchiver; "$objects" = ( "$null", { "$class" = "<CFKeyedArchiverUID 0x608000c2d220 [0x7fff70e30f00]>{value = 2}"; NSColorSpace = 1; NSRGB = <30203020 3000>; }, { "$classes" = ( NSColor, NSObject ); "$classname" = NSColor; } ); "$top" = { root = "<CFKeyedArchiverUID 0x608000c33260 [0x7fff70e30f00]>{value = 1}"; }; "$version" = 100000; }, 'dropOnRow': False, 'isProposal': True} """ def _callback_layer_drop(self, sender=None, dropInfo=None): # a color has been dropped on the layer list if dropInfo["isProposal"]: # TODO: check if drop is acceptable return True else: print("DEBUG: dropped color on row %i" % dropInfo["rowIndex"]) # TODO: accept the drop (actually do something) return True def _choose_svg_to_import(self, sender=None): self.showGetFile( ["public.svg-image"], self._layer_add_svg_from_file ) def _layer_add_svg_from_file(self, file_paths=None): # Add an SVG from external file if file_paths is not None: self.cfont.add_svg(self.glyph, file_paths[0]) sel = self.w.glyph_list.getSelection() self._callback_update_ui_glyph_list() self.w.glyph_list.setSelection(sel) def getNSColor(self, hexrgba): # get NSColor for a HTML-style hex rgb(a) color r = int(hexrgba[1:3], 16) / 255 g = int(hexrgba[3:5], 16) / 255 b = int(hexrgba[5:7], 16) / 255 if len(hexrgba) == 9: a = int(hexrgba[7:9], 16) / 255 else: a = 1 return NSColor.colorWithCalibratedRed_green_blue_alpha_(r, g, b, a) def getHexColor(self, nscolor): if nscolor.colorSpaceName != NSCalibratedRGBColorSpace: nscolor = nscolor.colorUsingColorSpaceName_(NSCalibratedRGBColorSpace) r = int(round(255 * float(nscolor.redComponent()))) g = int(round(255 * float(nscolor.greenComponent()))) b = int(round(255 * float(nscolor.blueComponent()))) a = int(round(255 * float(nscolor.alphaComponent()))) if a == 1: return "#%02x%02x%02x" % (r, g, b) else: return "#%02x%02x%02x%02x" % (r, g, b, a) def _callback_set_show_only_glyphs_with_layers(self, sender): self.show_only_glyphs_with_layers = sender.get() self._callback_update_ui_glyph_list() def _callback_toggle_settings(self, sender): # show or hide the settings drawer self.d.toggle() def _callback_set_sbix_sizes(self, sender): sizes_str = sender.get().split(",") sizes = [] for entry in sizes_str: entry = entry.strip("[], ") if entry != "": sizes.append(int(entry)) self.cfont.bitmap_sizes = sizes def _callback_color_changed_foreground(self, sender): if sender is not None: self.color = sender.get() self._ui_update_palette(self.palette_index) i = self.w.colorpalette.getSelection() if i != []: if int(self.w.colorpalette.get()[i[0]]["Index"]) == 0xffff: self.w.colorPaletteColorChooser.set(self.color) self._cache_color_info() self.w.preview.update() def _callback_color_changed_background(self, sender): if sender is not None: self.colorbg = sender.get() self._cache_color_info() self.w.preview.update() def _callback_color_select_in_palette(self, sender): # a color has been selected in the current palette i = sender.getSelection() if not i: # empty selection self.w.colorPaletteColorChooser.enable(False) else: sel = sender.get() selIndex = int(sel[i[0]]["Index"]) if selIndex == 0xffff: # use foreground color self.w.colorPaletteColorChooser.set(self.w.colorChooser.get()) self.w.colorPaletteColorChooser.enable(False) else: self.w.colorPaletteColorChooser.set(sel[i[0]]["Color"]) self.w.colorPaletteColorChooser.enable(True) def _callback_delete_from_palette(self, sender): print("_callback_delete_from_palette") sel = sender.getSelection() if sel == []: return i = sel[0] edited_entry = sender.get()[i] color_index = edited_entry["Index"] if color_index == 0xffff: print("Can't delete foreground color") return del sender[i] # Delete entry from all palettes self.cfont.remove_from_palettes(color_index) def _ui_update_palette_chooser(self): self.w.paletteswitch.setItems(["Palette %s" % i for i in range(len(self.cfont.palettes))]) def paletteEdit(self, sender): # What is this supposed to do? # It may allow the palettes to go out of sync when the color index is # edited in one palette, but not the other ones. pass # print(sender) # print("paletteEdit: Not implemented") # sel = sender.getSelection() # print(sel) # if sel != []: # i = sel[0] # # TODO: Sync changes to all other palettes # if i < len(self.w.colorpalette): # edited_entry = sender.get()[i] # if self.w.colorpalette[i] != sender.get()[i]: # self.w.colorpalette[i] = sender.get()[i] # self._paletteWriteToColorFont() # self.currentPaletteChanged = False # print(" Palette changed") # else: # # Something # print("Ignored edit of foreground color") # self.w.preview.update() def paletteEditColorCell(self, sender): # double-click on a color cell in the palette pass # print(sender) # print("paletteEditColorCell: Not implemented") def _paletteWriteToColorFont(self): # print("DEBUG _paletteWriteToColorFont") # make a dict for active palette and write it to self.cfont.palettes _dict = {} for _color in sorted( self.w.colorpalette.get(), key=lambda _key: int(_key["Index"]) ): color_index = int(_color["Index"]) if color_index != 0xffff: _dict[color_index] = self.getHexColor(_color["Color"]) self.cfont.palettes[self.palette_index] = _dict self.cfont.save_to_rfont() def _paletteSwitchCallback(self, sender): # activate a different palette # save current palette if self.currentPaletteChanged: self._paletteWriteToColorFont() self._ui_update_palette(sender.get()) # print("DEBUG Active Palette is now #%i" % self.palette_index) def paletteDuplicate(self, sender): sp = self.w.paletteswitch.get() if sp < len(self.cfont.palettes) and sp >= 0: print("Duplicate palette %i ..." % sp) self._paletteWriteToColorFont() colorpalette = self.cfont.palettes[sp].copy() else: colorpalette = {} self.cfont.palettes.append(colorpalette) self._ui_update_palette_chooser() # new palette should be active self._ui_update_palette(len(self.cfont.palettes) - 1) self._paletteWriteToColorFont() self.currentPaletteChanged = False def _ui_update_palette(self, palette_index): # load a different palette from the color font and show it in UI # save the currently selected color index selectedColorIndex = self.w.colorpalette.getSelection() self.palette_index = palette_index if self.palette_index < len(self.cfont.palettes): colorpalette = self.cfont.palettes[self.palette_index] else: colorpalette = {} int_keys = [int(k) for k in colorpalette.keys()] newColorpalette = [ { "Index": k, "Color": self.getNSColor(colorpalette[k]) } for k in sorted(int_keys) ] newColorpalette.append({"Index": 0xffff, "Color": self.color}) self.w.colorpalette.set(newColorpalette) self.w.colorpalette.setSelection(selectedColorIndex) self.w.paletteswitch.set(self.palette_index) self._cache_color_info() self._cache_color_info_glyph_window() self.currentPaletteChanged = False self.w.preview.update() UpdateCurrentGlyphView() def paletteDelete(self, sender): pass def getSelectedColorIndex(self): i = self.w.colorpalette.getSelection() if i == []: return None else: return self.w.colorpalette.get()[i[0]]["Index"] def _callback_color_changed_layer(self, sender): # a color has been changed in the palette # by editing the color well below it if sender is not None: _selected_color = self.w.colorpalette.getSelection() if _selected_color != []: _colors = self.w.colorpalette.get() _colors[_selected_color[0]]["Color"] = sender.get() self.w.colorpalette.set(_colors) self._paletteWriteToColorFont() self.currentPaletteChanged = False self._cache_color_info() self.w.preview.update() def windowCloseCallback(self, sender): # print("DEBUG windowCloseCallback") removeObserver(self, "fontDidOpen") removeObserver(self, "fontWillClose") removeObserver(self, "currentGlyphChanged") removeObserver(self, "drawBackground") removeObserver(self, "drawInactive") # setGlyphViewDisplaySettings(self.oldDisplaySettings) self._ui_layer_list_save_to_cfont() if self.cfont.save_settings and self.currentPaletteChanged: self._paletteWriteToColorFont() if self.cfont.save_settings: self.cfont.save_to_rfont() super(ColorFontEditor, self).windowCloseCallback(sender) def _callback_ui_glyph_list_selection(self, sender=None): # selection changed in the ui glyph list if self.glyph is not None: # save current glyph layers self._ui_layer_list_save_to_cfont() sel = self.w.glyph_list.getSelection() if sel == []: self.w.layer_list.set([]) self.glyph = None self.width = 0 self.w.add_layer_button.enable(False) else: self.glyph = self.w.glyph_list[sel[0]]["Name"] self.width = self.font[self.glyph].width self._ui_update_layer_list() self.w.add_layer_button.enable(True) self.w.preview.update() def _callback_update_ui_glyph_list(self, sender=None): _match = self.w.glyph_list_search_box.get() # print("DEBUG: _callback_update_ui_glyph_list") glyphlist = [] if self.font is not None: # if no color glyphs in font, show all glyphs in list if len(self.cfont.keys()) == 0: self.show_only_glyphs_with_layers = False self.w.show_only_glyphs_with_layers.set(False) self.w.show_only_glyphs_with_layers.enable(False) else: self.w.show_only_glyphs_with_layers.enable(True) cfglyphs = self.cfont.keys() for n in self.font.glyphOrder: if n in cfglyphs: _glyph_has_layers = True else: _glyph_has_layers = False if not self.show_only_glyphs_with_layers or _glyph_has_layers: if _match == "": glyphlist.append( { "Layers": _glyph_has_layers, "Name": n } ) else: if _match in n: glyphlist.append( { "Layers": _glyph_has_layers, "Name": n } ) self.w.glyph_list.set(glyphlist) if glyphlist != []: self.w.glyph_list.setSelection([0]) def _callback_update_ui_formats(self, sender=None): self.d.generateMSFormat.set(self.cfont.write_colr) self.d.generateAppleFormat.set(self.cfont.write_sbix) self.d.generateSVGFormat.set(self.cfont.write_svg) self.d.generateGoogleFormat.set(self.cfont.write_cbdt) self._check_bitmap_ui_active() self._check_export_ui_active() def _callback_select_formats(self, sender=None): self.cfont.write_colr = self.d.generateMSFormat.get() self.cfont.write_sbix = self.d.generateAppleFormat.get() self.cfont.write_svg = self.d.generateSVGFormat.get() self.cfont.write_cbdt = self.d.generateGoogleFormat.get() self._check_bitmap_ui_active() self._check_export_ui_active() def _check_bitmap_ui_active(self): _ui_active = self.cfont.write_sbix or self.cfont.write_cbdt self.d.generate_sbix_sizes.enable(_ui_active) self.d.preferPlacedImages.enable(_ui_active) def _check_export_ui_active(self): _ui_active = self.cfont.write_sbix or self.cfont.write_cbdt or self.cfont.write_colr or self.cfont.write_svg self.w.export_button.enable(_ui_active) def setFill(self, nscolor, opacity_factor=1): # set fill color for mojoDrawingTools, optionally with changed opacity if nscolor.colorSpaceName != NSCalibratedRGBColorSpace: nscolor = nscolor.colorUsingColorSpaceName_(NSCalibratedRGBColorSpace) fill(nscolor.redComponent(), nscolor.greenComponent(), nscolor.blueComponent(), nscolor.alphaComponent() * opacity_factor) def getColorDict(self): # returns the current UI color palette as dictionary {index: nscolor} return {int(_color["Index"]): _color["Color"] for _color in self.w.colorpalette.get()} def draw(self): # draw the color glyph on the canvas if self._font is not None: save() self.setFill(self.colorbg) rect(0, 0, 310, 200) self.setFill(self.color) scale(self.scale) translate(50.5, -self.metrics[0]+20.5) self._canvas_draw_metrics() for i in range(len(self.layer_glyphs)): layerGlyph = self.layer_glyphs[i]["Layer Glyph"] if self._selected_color_index is None: op_factor = 1.0 else: if self._selected_color_index == self.layer_glyphs[i]["layer_color_index"]: op_factor = 1.0 else: op_factor = 0.2 if layerGlyph in self.font: if i < len(self.layer_colors): _color = self.layer_colors[i] self.setFill(_color, op_factor) drawGlyph(self.font[layerGlyph]) restore() def _canvas_draw_metrics(self): save() strokeWidth(1 / self.scale) stroke(0.8, 0.8, 0.8) line((0, 0), (self.width, 0)) line((0, self.metrics[0]), (self.width, self.metrics[0])) line((0, self.metrics[1]), (self.width, self.metrics[1])) line((0, self.metrics[2]), (self.width, self.metrics[2])) line((0, self.metrics[3]), (self.width, self.metrics[3])) line((0, self.metrics[3]), (0, self.metrics[0])) line((self.width, self.metrics[3]), (self.width, self.metrics[0])) restore() def _observer_font_did_open(self, info=None): if self.font is None: self.font = info['font'] self._update_ui() def _observer_font_will_close(self, info=None): # When a font closes, save color font information # if it is the currently active font in RoboChrome. font_to_close = info['font'] if self.font == font_to_close: self._ui_layer_list_save_to_cfont() if self.cfont.save_settings and self.currentPaletteChanged: self._paletteWriteToColorFont() if self.cfont.save_settings: self.cfont.save_to_rfont() self.font = None self._update_ui() def _observer_glyph_changed(self, info=None): # Current Glyph has changed if info is not None: if info["glyph"] is not None: self.glyphPreview = CurrentGlyph().name if self.glyphPreview in self.cfont.keys(): self.layer_glyphs_glyph_window = self.cfont[self.glyphPreview].layers self._cache_color_info_glyph_window() UpdateCurrentGlyphView() def _callback_goto_glyph(self, sender=None): newGlyphName = sender.get()[sender.getSelection()[0]]["Name"] if CurrentGlyphWindow(): CurrentGlyphWindow().setGlyphByName(newGlyphName) else: # TODO: open glyph window? pass def _callback_select_glyphs_in_font_window(self, sender=None): # select all glyphs which have layers. self.font.selection = self.cfont.keys() def _observer_draw_glyph_window(self, info): # draw the color glyph in the glyph window # print("DEBUG: _observer_draw_glyph_window") if self.glyphPreview in self.cfont.keys(): # print("DEBUG: draw glyph") save() self.setFill(self.color) for i in range(len(self.layer_glyphs_glyph_window)): layerGlyph = self.layer_glyphs_glyph_window[i] if layerGlyph in self.font: if i < len(self.layer_colors_glyph_window): _color = self.layer_colors_glyph_window[i] self.setFill(_color) drawGlyph(self.font[layerGlyph]) restore() if self._debug_enable_live_editing: self.w.preview.update() def _callback_auto_layers(self, sender=None): print("Auto layers: %s" % self.cfont.auto_layer_regex) if self.cfont.auto_layer_regex is not None: self.cfont.auto_layers() self.cfont.auto_palette() self.cfont.save_to_rfont() self.cfont.save_all_glyphs_to_rfont() self._ui_update_palette_chooser() self._ui_update_palette(self.palette_index) self._ui_update_layer_list() self._callback_update_ui_glyph_list() if len(self.cfont) > 0: self.w.glyph_list.setSelection([0]) self.w.auto_layer_button.enable(False) else: print("ERROR: Invalid auto layer regex") def _callback_auto_palette(self, sender=None): self.cfont.auto_palette() self.cfont.save_to_rfont() self._ui_update_palette_chooser() self._ui_update_palette(self.palette_index) def _callback_check_regex(self, sender=None): # check if the entered regex does compile test_re = sender.get() try: compile(test_re) self.d.auto_layer_regex_ok.set(True) self.w.auto_layer_button.enable(True) self.cfont.auto_layer_regex = test_re self.d.regex_test_button.enable(True) except: self.d.auto_layer_regex_ok.set(False) self.w.auto_layer_button.enable(False) self.cfont.auto_layer_regex = None self.d.regex_test_button.enable(False) def _callback_test_regex(self, sender=None): # select glyphs based on current regex regex = compile(self.cfont.auto_layer_regex) _glyph_list = [glyphname for glyphname in self.font.glyphOrder if regex.search(glyphname)] # print("_callback_test_regex matched %i glyphs." % len(_glyph_list)) self.font.selection = _glyph_list def _callback_prefer_placed_images(self, sender=None): self.cfont.prefer_placed_images = sender.get() def _callback_auto_layer_include_baseglyph(self, sender=None): self.cfont.auto_layer_include_baseglyph = sender.get() if __name__ == "__main__": OpenWindow(ColorFontEditor) ```
{ "source": "jenskutilek/RoboFont", "score": 2 }
#### File: GlyphRotator.roboFontExt/lib/GlyphPreviewRotate.py ```python from __future__ import absolute_import, division, print_function # Advanced GlyphProcessor from vanilla import FloatingWindow, Slider from mojo.glyphPreview import GlyphPreview from mojo.roboFont import version as roboFontVersion from GlyphProcessor import GlyphProcessorUI from fontTools.misc.transform import Identity class RotatedGlyphPreview(GlyphProcessorUI): def __init__(self): self.extensionID = self.getExtensionID() self._initSettings() self._loadSettings() self.w = self._buildUI() self._addObservers() self.setUpBaseWindowBehavior() self._currentGlyphChangedObserver() self.w.open() def _getExtensionID(self): return "com.netzallee.rotatedGlyphPreview" def _buildUI(self): w = FloatingWindow((300, 340), "Rotated Glyph Preview", (300, 340), (2560, 1440)) w.preview = GlyphPreview((2, 2, -2, -40)) w.rotationSlider = Slider((10, -30, -10, 20), minValue=-180, maxValue=180, value=self.settings["rotation"], tickMarkCount=5, stopOnTickMarks=False, continuous=True, callback=self._setRotation, sizeStyle="small", ) return w def _getObservers(self): return { "draw": ["_currentGlyphChangedObserver",], "currentGlyphChanged": ["_currentGlyphChangedObserver",], } def _currentGlyphChangedObserver(self, info=None): if CurrentFont() is not None: self._scale = 1000 / CurrentFont().info.unitsPerEm self._y = (CurrentFont().info.ascender + CurrentFont().info.descender) / 2 * self._scale else: self._scale = 1 self._y = 500 self._draw() def _setRotation(self, sender): _rotation = sender.get() if 87 <= _rotation <= 93: _rotation = 90 elif -93 <= _rotation <= -87: _rotation = -90 elif -3 <= _rotation <= 3: _rotation = 0 self.settings["rotation"] = _rotation self._draw() def _deepAppendGlyph(self, glyph, gToAppend, font, offset=(0, 0)): if not gToAppend.components: glyph.appendGlyph(gToAppend, offset) else: for component in gToAppend.components: if component.baseGlyph not in font.keys(): # avoid traceback in the case where the selected glyph # is referencing a component whose glyph is not in the font continue compGlyph = font[component.baseGlyph].copy() if component.transformation != (1, 0, 0, 1, 0, 0): # if component is skewed and/or is shifted: matrix = component.transformation[0:4] if matrix != (1, 0, 0, 1): # if component is skewed transformObj = Identity.transform(matrix + (0, 0)) # ignore the original component's shifting values compGlyph.transform(transformObj) # add the two tuples of offset: totalOffset = tuple(map(sum, zip(component.offset, offset))) glyph.appendGlyph(compGlyph, totalOffset) for contour in gToAppend: glyph.appendContour(contour, offset) # if the assembled glyph still has components, recursively # remove and replace them 1-by-1 by the glyphs they reference: if glyph.components: nestedComponent = glyph.components[-1] glyph.removeComponent(nestedComponent) glyph = self._deepAppendGlyph(glyph, font[nestedComponent.baseGlyph], font, nestedComponent.offset) return glyph def _draw(self): cG = CurrentGlyph() if cG is not None: self.previewGlyph = self._deepAppendGlyph(RGlyph(), cG, CurrentFont()) self.previewGlyph.width = cG.width * self._scale if roboFontVersion >= "2.0b": self.previewGlyph.scaleBy((self._scale, self._scale)) self.previewGlyph.rotateBy(self.settings["rotation"], (self.previewGlyph.width / 2, self._y)) else: self.previewGlyph.scale((self._scale, self._scale)) self.previewGlyph.rotate(self.settings["rotation"], (self.previewGlyph.width / 2, self._y)) else: self.previewGlyph = RGlyph() self.w.preview.setGlyph(self.previewGlyph) def _initSettings(self): self.settings = { "rotation": 0, } OpenWindow(RotatedGlyphPreview) ``` #### File: RoboFont/scripts/Histogram.py ```python from math import ceil import vanilla from defconAppKit.windows.baseWindow import BaseWindowController import traceback try: import GlyphsApp env = "glyphs" except: env = "robofont" if env == "robofont": from mojo.canvas import Canvas import mojo.drawingTools as drawing from lib.tools.defaults import getDefault elif env == "glyphs": from robofab.world import CurrentFont from feind.vanilla import Canvas from feind import drawing class UnitizationInfo(object): def __init__(self): self.systems = {} def add_system(self, system): if system.upm in self.systems: self.systems[system.upm].append(system) else: self.systems[system.upm] = [system] def get_systems_by_upm(self, upm): if upm in self.systems: return self.systems[upm] else: return [] def get_all_units(self): return self.systems.keys() def get_ui_list(self): # return a list that can be used in vanilla.List ui_list = ["-"] for upm in sorted(self.systems.keys()): for system in sorted(self.systems[upm]): ui_list.append(system.name) return ui_list def get_system_by_name(self, name): for system_list in self.systems.itervalues(): for system in system_list: if system.name == name: return system return None class UnitSystem(object): def __init__(self, name, units, min_units=None, max_units=None, strategy="free", unit_dict={}): self.name = name self.upm = units self.min_units = min_units self.max_units = max_units # unitization strategies if strategy == "alleq": self.set_all_equal() elif strategy == "fixed": if unit_dict: self.set_fixed_units(unit_dict) else: raise "Must supply unit_dict when using fixed units" elif strategy == "free": self.set_free_units() else: raise "Unknown unitization strategy." def set_fixed_units(self, unit_dict): self.all_equal = False self.fixed_units = unit_dict self.free_units = False def set_all_equal(self, all_equal=True): self.all_equal = all_equal self.fixed_units = {} self.free_units = False def set_free_units(self, free_units=True): self.all_equal = False self.fixed_units = {} self.free_units = free_units # define known unit systems # sources: <http://www.quadibloc.com/comp/propint.htm> # <NAME> (Hrsg.): <NAME>. <NAME>. Birkhaeuser 2009 unitization_info = UnitizationInfo() unitization_info.add_system(UnitSystem("Monospaced", 1, 0, 1, "alleq")) unitization_info.add_system(UnitSystem("IBM Executive Typewriter", 5, 2, 5, "fixed", { 2: ["f", "i", "j", "l", "t", "I", "period", "comma", "colon", "semicolon", "quotesingle", "exclam", "parenleft", "parenright", "space"], 3: ["a", "b", "c", "d", "e", "g", "h", "k", "n", "o", "p", "q", "r", "s", "u", "v", "x", "y", "z", "J", "S", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", 'quotedbl', "question", "numbersign", "plus", "hyphen", "asterisk", "slash", "equal"], 4: ["w", "A", "B", "C", "D", "E", "F", "G", "H", "K", "L", "N", "O", "P", "Q", "R", "T", "U", "V", "X", "Y", "Z", "ampersand"], 5: ["m", "W", "M", "at", "percent", "underscore", "onehalf", "onequarter"], })) unitization_info.add_system(UnitSystem("IBM Mag Card Executive (1972)", 7, 3, 7, "fixed", { 3: ["i", "j", "l"], 4: ["f", "t", "I", "quotesingle"], 5: ["a", "c", "e", "h", "k", "n", "o", "r", "s", "u", "v", "x", "z", "J", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "period", "comma", "colon", "semicolon", "exclam", "question", "quotedbl", "at", "numbersign", "dollar", "ampersand", "cent", "parenleft", "parenright", "+", "-", "*", "/", "=", "_", "onehalf", "onequarter", "space"], 6: ["b", "d", "g", "p", "q", "y", "E", "F", "L", "P", "S", "Z"], 7: ["m", "w", "A", "B", "C", "D", "G", "H", "K", "M", "N", "O", "Q", "R", "T", "U", "V", "W", "X", "Y"], })) unitization_info.add_system(UnitSystem("IBM Selectric Composer (1966)", 9, 3, 9, "fixed", { 3: ["i", "j", "l", "period", "comma", "semicolon", "grave", "quotesingle", "hyphen", "space"], 4: ["f", "t", "r", "s", "I", "colon", "exclam", "parenleft", "parenright", "slash"], 5: ["a", "c", "e", "g", "z", "J", "bracketleft"], 6: ["b", "d", "h", "k", "n", "p", "q", "u", "v", "x", "y", "P", "S", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "bracketright", "plus", "asterisk", "equal", "dollar", "dagger"], 7: ["B", "C", "E", "F", "L", "T", "Z"], 8: ["w", "A", "D", "G", "H", "K", "N", "O", "Q", "R", "U", "V", "X", "Y", "ampersand", "at", "percent", "onehalf", "onequarter", "threequarters", "emdash"], 9: ["m", "W", "M"], })) unitization_info.add_system(UnitSystem("Linotype early photo typesetting", 18, 0, 18, "free")) unitization_info.add_system(UnitSystem("Monotype hot metal and Monophoto Filmsetter", 18, 4, 18, "free")) unitization_info.add_system(UnitSystem("Lumitype", 36, 0, None, "free")) unitization_info.add_system(UnitSystem("Berthold photo typesetting", 48, 0, None, "free")) unitization_info.add_system(UnitSystem("Linotype later photo typesetting (3 x 18)", 54, 0, 54, "free")) unitization_info.add_system(UnitSystem("URW photo typesetting", 54, 0, 54, "free")) """ HistogramUI - the main window (RoboFont-specific) """ class HistogramUI(BaseWindowController): def __init__(self): self.histogram_width = 608 self.histogram_height = 380 histogram_x_offset = 10 histogram_y_offset = 10 window_width = 630 window_height = 500 # how much of the em is shown? self.ems_horizontal = 1.3 # how many pixels on the canvas per letter? self.scale_vertical = 10.0 self.units = 18 self.known_units = unitization_info.get_all_units() self.system = None self.show_fixed = True self.glyphs = [] # show glyphs or lines? self.show_glyphs = True y = 10 self.w = vanilla.FloatingWindow((window_width, window_height), "Glyph Width Histogram") self.w.histogram = Canvas((histogram_x_offset, histogram_y_offset, -10, self.histogram_height), canvasSize=(self.histogram_width, self.histogram_height), hasHorizontalScroller=False, hasVerticalScroller=False, delegate=self) y += self.histogram_height + histogram_y_offset + 8 self.w.unit_label = vanilla.TextBox((histogram_x_offset, y, 90, 20), "Units per em:") self.w.unit_display = vanilla.TextBox((histogram_x_offset + 90, y, 30, 20), self.units) self.w.system_selection = vanilla.PopUpButton((histogram_x_offset + 120, y-1, 300, 20), [], callback = self._systemCallback, #sizeStyle = "small", ) self.w.show_fixed = vanilla.CheckBox((histogram_x_offset + 462, y-1, 300, 20), "Show system widths", value=self.show_fixed, callback = self._showFixedCallback, ) y += 28 self.w.unit_slider = vanilla.Slider((histogram_x_offset, y, -10, 20), minValue = 1, maxValue = 64, value = self.units, tickMarkCount = 64, callback = self._unitCallback, stopOnTickMarks = True, ) y += 33 self.w.glyph_selection_label = vanilla.TextBox((histogram_x_offset, y, 120, 20), "Analyze glyphs:") self.w.glyph_selection = vanilla.RadioGroup((histogram_x_offset + 110, y, 210, 20), ["Selected", "All", "Charset:"], isVertical = False, callback = self._glyphSelectionCallback, sizeStyle = "small", ) self.w.charset_selection = vanilla.PopUpButton((histogram_x_offset + 320, y, -110, 20), [], callback = self._charsetCallback, #sizeStyle = "small", ) self.w.update_button = vanilla.Button((-100, y, -10, 20), "Update", callback = self._updateButtonCallback ) self._unitCallback() self.w.system_selection.setItems(unitization_info.get_ui_list()) if self.units in self.known_units: self.system = unitization_info.get_systems_by_upm(self.units)[0] self.w.system_selection.set(unitization_info.get_ui_list().index(self.system.name)) else: self.system = None self.w.system_selection.set(0) self.w.glyph_selection.set(0) self.w.charset_selection.enable(False) self.update_charset_selection() self.glyphs = self.get_glyphnames_for_histogram() self.calculate_histogram() self.setUpBaseWindowBehavior() #addObserver(self, "glyphChanged", "currentGlyphChanged") #addObserver(self, "glyphChanged", "draw") #addObserver(self, "removeFontFromList", "fontWillClose") #addObserver(self, "updateFontList", "fontDidOpen") self.w.open() def _unitCallback(self, sender=None): self.units = int(self.w.unit_slider.get()) if sender is not None: if self.units in self.known_units: self.system = unitization_info.get_systems_by_upm(self.units)[0] self.w.system_selection.set(unitization_info.get_ui_list().index(self.system.name)) else: self.system = None self.w.system_selection.set(0) text = "%i" % self.units self.w.unit_display.set(self.units) self._check_fixed() self.w.histogram.update() def _systemCallback(self, sender=None): # a unit system has been selected name = self.w.system_selection.getItems()[sender.get()] if name != "-": self.system = unitization_info.get_system_by_name(name) self.w.unit_slider.set(self.system.upm) self._unitCallback() self._check_fixed() def _check_fixed(self): if self.system is not None: if self.system.fixed_units: self.w.show_fixed.enable(True) else: self.w.show_fixed.enable(False) else: self.w.show_fixed.enable(False) def _showFixedCallback(self, sender=None): self.show_fixed = sender.get() self.w.histogram.update() def _glyphSelectionCallback(self, sender=None): if self.w.glyph_selection.get() == 2: self.w.charset_selection.enable(True) else: self.w.charset_selection.enable(False) self.glyphs = self.get_glyphnames_for_histogram() self.calculate_histogram() def _charsetCallback(self, sender=None): self.glyphs = self.get_glyphnames_for_histogram() self.calculate_histogram() def _updateButtonCallback(self, sender=None): print "__updateButtonCallback" try: charset_index = self.w.charset_selection.get() self.update_charset_selection() if charset_index <= len(self.w.charset_selection.getItems()): self.w.charset_selection.set(charset_index) else: self.w.charset_selection.set(-1) self.glyphs = self.get_glyphnames_for_histogram() self.calculate_histogram() self.w.histogram.update() except: print "__updateButtonCallback Error" print traceback.format_exc() def update_charset_selection(self): if env == "robofont": self.charsets = getDefault("charsets") else: self.charsets = {} self.w.charset_selection.setItems(sorted(self.charsets.keys())) #if len(self.charsets) == 0: # self.w.charset_selection.enable(False) #else: # self.w.charset_selection.enable(True) def get_glyphnames_for_histogram(self): font = CurrentFont() mode = self.w.glyph_selection.get() if mode == 0: #print "Analyze Selection" names = font.selection elif mode == 1: #print "Analyze All Glyphs" names = font.glyphOrder print "__Names:", names else: #print "Analyze Charset" all_glyphs = font.glyphOrder selected_charset_name = self.w.charset_selection.getItems()[self.w.charset_selection.get()] names = [name for name in self.charsets[selected_charset_name] if name in all_glyphs] return names def calculate_histogram(self, sender=None): print "calculate_histogram" try: font = CurrentFont() #names = self.get_glyphnames_for_histogram() histogram = {} max_width = 0 for name in self.glyphs: width = font[name].width if width > max_width: max_width = width if width in histogram: histogram[width].append(name) else: histogram[width] = [name] self.max_width = max_width self.histogram = histogram except Exception, err: print "calculate_histogram Error" print traceback.format_exc() print "calculate_histogram" #print self.histogram self.w.histogram.update() def draw(self): # canvas draw callback drawing.save() self._drawGrid() drawing.restore() font = CurrentFont() if self.show_fixed and self.system is not None and self.system.fixed_units: # display the fixed widths of the current unitization system self.draw_histogram(font, self.system.upm, (0, 0, 1, 0.5), True, histogram=self.system.fixed_units) # draw the histogram for the current font print "__font:", font print "__font.info:", font.info self.draw_histogram(font, font.info.unitsPerEm, (1, 0, 0, 1), True) def draw_histogram(self, font, upm, color, show_glyphs=False, histogram=None): if histogram is None: if self.histogram is None: return histogram = self.histogram drawing.save() drawing.fill(1, 0.5, 0.2, 1.0) drawing.stroke(color[0], color[1], color[2], color[3]) for width in sorted(histogram.keys()): num = len(histogram[width]) x = 10 + width * self.histogram_width / (upm * self.ems_horizontal) drawing.save() if show_glyphs: drawing.save() drawing.fill(color[0], color[1], color[2], 0.2) drawing.fontsize(self.scale_vertical) for i in range(len(histogram[width])): glyph_name = histogram[width][i] if glyph_name in font: u = font[glyph_name].unicode if u: drawing.text("%s" % unichr(u), x + 4, 18 + i * self.scale_vertical) else: drawing.text("%s" % glyph_name, x + 4, 18 + i * self.scale_vertical) else: drawing.text("%s" % glyph_name, x + 4, 18 + i * self.scale_vertical) drawing.restore() drawing.strokewidth(2) else: drawing.strokewidth(6) # draw bars drawing.line(x, 20, x, 20 + num * self.scale_vertical) drawing.strokewidth(0) drawing.text("%s" % (num), x - 3 * len(str(num)), 22 + num * self.scale_vertical) drawing.restore() drawing.restore() def _drawGrid(self): label_every = 1 if self.units > 24: label_every = 2 drawing.save() drawing.strokeWidth(0) drawing.stroke(None) drawing.fill(0.88, 0.92, 0.98) if self.system is not None: if self.system.min_units is not None: drawing.rect(0, 0, 10 + self.system.min_units * self.histogram_width / (self.units * self.ems_horizontal), self.histogram_height) if self.system.max_units is not None: drawing.rect(10 + self.system.max_units * self.histogram_width / (self.units * self.ems_horizontal), 0, self.histogram_width, self.histogram_height) drawing.strokeWidth(1.0) drawing.stroke(0.8, 0.8, 0.8) drawing.fill(0.6, 0.6, 0.6) for u in range(0, int(ceil(self.units * self.ems_horizontal))): x = 10 + u * self.histogram_width / (self.units * self.ems_horizontal) if u == self.units: # mark the full em drawing.stroke(0, 0, 0) drawing.line(x, 20, x, self.histogram_height-10) drawing.strokeWidth(0) drawing.text("1 em", x + 4, self.histogram_height - 21) drawing.strokeWidth(1.0) elif u % 10 == 0: # make every 10th line darker drawing.stroke(0.5, 0.5, 0.5) drawing.line(x, 20, x, self.histogram_height - 20) else: drawing.stroke(0.8, 0.8, 0.8) drawing.line(x, 20, x, self.histogram_height - 30) if u % label_every == 0: drawing.strokeWidth(0) drawing.text("%s" % (u), x - 3 * len(str(u)), 5) drawing.restore() if __name__ == "__main__": # if RoboFont #OpenWindow(HistogramUI) print "__Main Start" try: HistogramUI() except: print "except" print sys.exc_info()[0] print "__Main End" ``` #### File: RoboFont/scripts/NotificationCenterTest.py ```python from os.path import exists def notify(title, subtitle, message): from os import system t = '-title {!r}'.format(title) s = '-subtitle {!r}'.format(subtitle) m = '-message {!r}'.format(message) a = '-sender {!r}'.format("com.typemytype.robofont") system('terminal-notifier {}'.format(' '.join([m, t, s, a]))) if exists("/usr/bin/terminal-notifier"): use_notifications = True else: use_notifications = False print "In order to use notifications, install the command line program with:" print "$ sudo gem install terminal-notifier" if use_notifications: notify("Hello from RoboFont", "Hello", "World") ```
{ "source": "jenskutilek/TypoLabs2016", "score": 4 }
#### File: jenskutilek/TypoLabs2016/07 Draw Circle In Glyph Loop.py ```python from robofab.world import CurrentGlyph from math import cos, degrees, pi, radians, sin def draw_circle(pen, (x, y), diameter=50, clockwise=False, roundness=0.552, phi=0): radius = 0.5 * diameter ctrl = 0.5 * diameter * roundness factor = -1 if clockwise else 1 phi = radians(phi) pen.moveTo( ( x + radius * sin(phi), y - radius * cos(phi) ) ) for i in range(4): rho = - factor * phi - i * 0.5 * pi ctrl1, ctrl2, pt = ( ( round(x - factor * radius * sin(rho) + factor * ctrl * cos(rho)), round(y - radius * cos(rho) - ctrl * sin(rho)) ), ( round(x + factor * radius * cos(rho) - factor * ctrl * sin(rho)), round(y - radius * sin(rho) - ctrl * cos(rho)) ), ( round(x + factor * radius * cos(rho)), round(y - radius * sin(rho)) ) ) pen.curveTo(ctrl1, ctrl2, pt) pen.closePath() g = CurrentGlyph() g.clear() p = g.getPen() r = 400 rd = 0.95 for i in range(14): clockwise = i % 2 rd -= 0.06 phi = 2 * i * pi draw_circle(p, (275, 216), r, clockwise, rd, phi) r -= 20 ``` #### File: jenskutilek/TypoLabs2016/08 Draw Polygon In Glyph.py ```python from robofab.world import CurrentGlyph from math import cos, degrees, pi, radians, sin def draw_polygon(pen, (x, y), diameter=50, n=8, phi=0, clockwise=False): """ pen: a RoboFab pen. (x, y): center coordinates diameter: diameter of the polygon n: number of corners phi: rotation in degrees clockwise: direction of the path """ radius = 0.5 * diameter phi = radians(phi) # Start point pen.moveTo( ( x - radius * sin(phi), y - radius * cos(phi) ) ) # Draw the segments if clockwise: for i in range(1, n): rho = 2 * pi * i / n pen.lineTo( ( x - radius * sin(phi + rho), y - radius * cos(phi + rho) ) ) else: for i in range(n - 1, 0, -1): rho = 2 * pi * i / n pen.lineTo( ( x - radius * sin(phi + rho), y - radius * cos(phi + rho) ) ) # Close the path pen.closePath() g = CurrentGlyph() g.clear() p = g.getPen() diameter = 600 n = 8 for i in range(10): clockwise = i % 2 phi = 1.2 * i * pi draw_polygon(p, (275, 216), diameter, n, phi, clockwise) #n -= 1 diameter -= 50 ``` #### File: jenskutilek/TypoLabs2016/19 Extremum Point Check.py ```python from robofab.world import CurrentFont, CurrentGlyph from fontTools.pens.basePen import BasePen # helper functions from fontTools.misc.arrayTools def normRect(rect): """Normalize the rectangle so that the following holds: xMin <= xMax and yMin <= yMax """ (xMin, yMin, xMax, yMax) = rect return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax) def pointInRect(p, rect): """Return True when point (x, y) is inside rect.""" (x, y) = p xMin, yMin, xMax, yMax = rect return (xMin <= x <= xMax) and (yMin <= y <= yMax) class MyPen(BasePen): def _moveTo(self, pt): pass def _lineTo(self, pt): pass def _curveToOne(self, bcp1, bcp2, pt): curr = self._getCurrentPoint() rect = normRect((curr[0], curr[1], pt[0], pt[1])) if not pointInRect(bcp1, rect): print "Control point is out of bounding box:", bcp1 print " ", rect if not pointInRect(bcp2, rect): print "Control point is out of bounding box:", bcp2 print " ", rect def _closePath(self): pass def _endPath(self): pass def addComponent(self, baseGlyphName, transformation): pass p = MyPen(CurrentFont()) CurrentGlyph().draw(p) ``` #### File: jenskutilek/TypoLabs2016/20 HPGL Converter.py ```python from robofab.world import CurrentGlyph from fontTools.pens.basePen import BasePen class HPGLPen(BasePen): def __init__(self, glyphSet, scale=1): BasePen.__init__(self, glyphSet) self._scale = scale self._init_seq = self._get_init_sequence() self._hpgl = "" self._end_seq = self._get_end_sequence() self._pen_down = False self._prev_segment = None def _get_init_sequence(self): seq = "IN;" # IN = Initialize # IP = Scaling Point x1, y1, x2, y2 seq += "IP%s,%s,%s,%s;" % (0, 0, 16158, 11040) # reported by plotter # SC = Scale seq += "SC%s,%s,%s,%s;" % (0, 0, 1190, 842) # A3 in points (1/72 inch) # the rest of the init sequence seq += "PU;" # PU = Pen Up seq += "SP1;" # SP1 = Select Pen 1 seq += "LT;\n" # LT = Line Type solid return seq def _get_end_sequence(self): seq = "PU;" # PU = Pen Up seq += "SP;" # Put down the pen seq += "EC;" # EC = ? #seq += "PG1;" # PG = Page Feed #seq += "EC1;" # EC = ? seq = "PA0,0;" # Move head to 0, 0 seq += "OE;\n" # OE = ? return seq def _get_scaled_pt(self, (x, y)): if self._scale != 1: return (x * self._scale, y * self._scale) else: return (int(round(x)), int(round(y))) def _moveTo(self, pt): pt = self._get_scaled_pt(pt) self._hpgl += "PU%s,%s" % pt self._pen_down = False self.lastMove = pt self._prev_segment = "move" def _lineTo(self, pt): if self._prev_segment not in ["line", "curve"]: self._hpgl += ";PD" else: self._hpgl += "," pt = self._get_scaled_pt(pt) self._hpgl += "%s,%s" % pt self._prev_segment = "line" def _curveToOne(self, bcp1, bcp2, pt): bcp1 = self._get_scaled_pt(bcp1) bcp2 = self._get_scaled_pt(bcp2) pt = self._get_scaled_pt(pt) if self._prev_segment not in ["line", "curve"]: self._hpgl += ";PD" else: self._hpgl += "," self._hpgl += "%s,%s,%s,%s,%s,%s" % (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1]) self._prev_segment = "curve" def _closePath(self): if self._prev_segment not in ["line", "curve"]: self._hpgl += ";PD" self._hpgl += "%s,%s\n" % pt else: self._hpgl += "," pt = self._get_scaled_pt(self.lastMove) self._hpgl += "%s,%s;\n" % pt self._prev_segment = "close" def _endPath(self): self._hpgl += u'PU;\n' self._prev_segment = "end" @property def hpgl(self): return self._init_seq + self._hpgl + self._end_seq def glyph_to_hpgl(glyph): # Drawing limits: (left 0; bottom 0; right 16158; top 11040) anchors = [a for a in glyph.anchors] glyph.clear(contours=False, components=False, anchors=True, guides=False) pen = HPGLPen(glyph._parent) glyph.draw(pen) for a in anchors: glyph.appendAnchor(a.name, a.position) return pen.hpgl g = CurrentGlyph() print glyph_to_hpgl(g) ``` #### File: TypoLabs2016/penCollection/svgPen.py ```python from fontTools.pens.basePen import BasePen # See also: # http://www.w3.org/TR/SVG/paths.html#PathDataBNF # https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths # SVG path parsing code from: # http://codereview.stackexchange.com/questions/28502/svg-path-parsing def parse_svg_path(path_data): digit_exp = '0123456789eE' comma_wsp = ', \t\n\r\f\v' drawto_command = 'MmZzLlHhVvCcSsQqTtAa' sign = '+-' exponent = 'eE' float = False entity = '' for char in path_data: if char in digit_exp: entity += char elif char in comma_wsp and entity: yield entity float = False entity = '' elif char in drawto_command: if entity: yield entity float = False entity = '' yield char elif char == '.': if float: yield entity entity = '.' else: entity += '.' float = True elif char in sign: if entity and entity[-1] not in exponent: yield entity float = False entity = char else: entity += char if entity: yield entity def drawSVGPath(pen, path=""): """ Draw an SVG path that is supplied as a string. This is limited to SVG paths that contain only elements that can be matched to the usual path elements found in a glyph. """ path_data = list(parse_svg_path(path)) #print path_data i = 0 prev_x = 0 prev_y = 0 while i < len(path_data): #print i, path_data[i] v = path_data[i] if v in "Cc": # Cubic curve segment x1, y1, x2, y2, x3, y3 = path_data[i+1:i+7] #print " ", x1, y1, x2, y2, x3, y3 x1 = float(x1) y1 = float(y1) x2 = float(x2) y2 = float(y2) x3 = float(x3) y3 = float(y3) if v == "c": x1 += prev_x y1 += prev_y x2 += prev_x y2 += prev_y x3 += prev_x y3 += prev_y pen.curveTo( (x1, y1), (x2, y2), (x3, y3), ) prev_x = x3 prev_y = y3 i += 7 elif v in "Hh": # Horizontal line segment x = path_data[i+1] #print " ", x x = float(x) if v == "h": x += prev_x pen.lineTo((x, prev_y)) prev_x = x i += 2 elif v in "LlMm": # Move or Line segment x, y = path_data[i+1:i+3] #print " ", x, y x = float(x) y = float(y) if v in "lm": x += prev_x y += prev_y if v in "Ll": pen.lineTo((x, y)) else: pen.moveTo((x, y)) prev_x = x prev_y = y i += 3 elif v in "Qq": # Quadratic curve segment x1, y1, x2, y2 = path_data[i+1:i+5] #print " ", x1, y1, x2, y2 x1 = float(x1) y1 = float(y1) x2 = float(x2) y2 = float(y2) if v == "q": x1 += prev_x y1 += prev_y x2 += prev_x y2 += prev_y pen.qCurveTo( (x1, y1), (x2, y2), ) prev_x = x2 prev_y = y2 i += 5 elif v in "Vv": # Vertical line segment y = path_data[i+1] #print y y = float(y) if v == "v": y += prev_y pen.lineTo((prev_x, y)) prev_y = y i += 2 elif v in "Zz": pen.closePath() i += 1 else: print "SVG path element '%s' is not supported for glyph paths." % path_data[i] break class SVGpen(BasePen): def __init__(self, glyphSet, round_coordinates=False, relative_coordinates=False, optimize_output=False): """ A pen that converts a glyph outline to an SVG path. After drawing, SVGPen.d contains the path as string. This corresponds to the SVG path element attribute "d". glyphSet (RFont or GSFont) The font object round_coordinates (Boolean) Round all coordinates to integer. Default is False. relative_coordinates (Boolean) Store all coordinates as relative. Default is False, i.e. choose whichever notation (absolute or relative) produces shorter output for each individual segment. optimize_output (Boolean) Make the output path string as short as possible. Default is True. Setting this to False also overrides the relative_coordinates option. """ self._rnd = round_coordinates self._rel = relative_coordinates self._opt = optimize_output self.prev_x = 0 self.prev_y = 0 BasePen.__init__(self, glyphSet) self.d = '' def reset(self): self.d = '' def _append_shorter(self, absolute, relative): if not self._rel and len(absolute) <= len(relative) or not self._opt: self.d += absolute else: self.d += relative def _get_shorter_sign(self, value): if value < 0 and self._opt: return '%s' % value else: return ' %s' % value def _moveTo(self, (x,y)): if self._rnd: a = 'M%s' % int(round(x)) a += self._get_shorter_sign(int(round(y))) else: a = 'M%s' % x a += self._get_shorter_sign(y) self.d += a self.prev_x = x self.prev_y = y def _lineTo(self, (x,y)): if y == self.prev_y: if self._rnd: a = 'H%d' % (int(round(x))) r = 'h%d' % (int(round(x - self.prev_x))) else: a = 'H%s' % (x) r = 'h%s' % (x - self.prev_x) elif x == self.prev_x: if self._rnd: a = 'V%d' % (int(round(y))) r = 'v%d' % (int(round(y - self.prev_y))) else: a = 'V%s' % (y) r = 'v%s' % (y - self.prev_y) else: if self._rnd: a = 'L%d' % int(round(x)) a += self._get_shorter_sign(int(round(y))) r = 'l%d' % int(round(x - self.prev_x)) r += self._get_shorter_sign(int(round(y - self.prev_y))) else: a = 'L%s' % x a += self._get_shorter_sign(y) r = 'l%s' % (x - self.prev_x) r += self._get_shorter_sign(y - self.prev_y) self._append_shorter(a, r) self.prev_x = x self.prev_y = y def _curveToOne(self, (x1,y1), (x2,y2), (x3,y3)): if self._rnd: a = 'C%s' % int(round(x1)) for coord in [ int(round(y1)), int(round(x2)), int(round(y2)), int(round(x3)), int(round(y3)) ]: a += self._get_shorter_sign(coord) r = 'c%s' % int(round(x1 - self.prev_x)) for coord in [ int(round(y1 - self.prev_y)), int(round(x2 - self.prev_x)), int(round(y2 - self.prev_y)), int(round(x3 - self.prev_x)), int(round(y3 - self.prev_y)) ]: r += self._get_shorter_sign(coord) else: a = 'C%s' % x1 for coord in [y1, x2, y2, x3, y3]: a += self._get_shorter_sign(coord) r = 'c%s' % (x1 - self.prev_x) for coord in [ y1 - self.prev_y, x2 - self.prev_x, y2 - self.prev_y, x3 - self.prev_x, y3 - self.prev_y ]: r += self._get_shorter_sign(coord) self._append_shorter(a, r) self.prev_x = x3 self.prev_y = y3 def _closePath(self): self.d += u'z' ```
{ "source": "jenskutilek/ufo2ft", "score": 3 }
#### File: ufo2ft/filters/scaleUPM.py ```python from ufo2ft.filters import BaseFilter class ScaleUPMFilter(BaseFilter): """ This filter scales the font to a new upm value. Set the target upm in an UFO like this: <key>com.github.googlei18n.ufo2ft.filters</key> <array> <dict> <key>name</key> <string>scaleUPM</string> <key>kwargs</key> <dict> <key>unitsPerEm</key> <string>2048</string> </dict> </dict> </array> """ _kwargs = { "unitsPerEm": 1000, } def _scaleGlyph(self, glyph): """ Scale a glyph """ for contour in glyph: for point in contour.points: point.x *= self.factor point.y *= self.factor for anchor in glyph.anchors: anchor.x *= self.factor anchor.y *= self.factor glyph.width *= self.factor def _scaleList(self, obj, name): """ Scale a font info property that is a list, i.e. scale each value. """ lst = getattr(obj, name) if lst is None: return lst = [self.factor * v for v in lst] setattr(obj, name, lst) def _scaleProperty(self, obj, name): prop = getattr(obj, name) if prop is None: return setattr(obj, name, self.factor * prop) def __call__(self, font, glyphSet=None): newUnitsPerEm = int(self.options.unitsPerEm) if font.info.unitsPerEm == newUnitsPerEm: return False self.factor = newUnitsPerEm / font.info.unitsPerEm # Scale glyphs super(ScaleUPMFilter, self).__call__(font, glyphSet) # Scale kerning for pair, value in font.kerning.items(): font.kerning[pair] = value * self.factor # TODO: Change positioning feature code # Scale info values for prop in ( "descender", "xHeight", "capHeight", "ascender", "openTypeHheaAscender", "openTypeHheaDescender", "openTypeHheaLineGap", "openTypeHheaCaretOffset", "openTypeOS2TypoAscender", "openTypeOS2TypoDescender", "openTypeOS2TypoLineGap", "openTypeOS2WinAscent", "openTypeOS2WinDescent", "openTypeOS2SubscriptXSize", "openTypeOS2SubscriptYSize", "openTypeOS2SubscriptXOffset", "openTypeOS2SubscriptYOffset", "openTypeOS2SuperscriptXSize", "openTypeOS2SuperscriptYSize", "openTypeOS2SuperscriptXOffset", "openTypeOS2SuperscriptYOffset", "openTypeOS2StrikeoutSize", "openTypeOS2StrikeoutPosition", "openTypeVheaVertTypoAscender", "openTypeVheaVertTypoDescender", "openTypeVheaVertTypoLineGap", "openTypeVheaCaretOffset", "postscriptUnderlineThickness", "postscriptUnderlinePosition", ): self._scaleProperty(font.info, prop) for prop in ( "postscriptBlueValues", "postscriptOtherBlues", "postscriptFamilyOtherBlues", "postscriptStemSnapH", "postscriptStemSnapV", "postscriptBlueFuzz", "postscriptBlueShift", "postscriptBlueScale", "postscriptDefaultWidthX", "postscriptNominalWidthX", ): self._scaleList(font.info, prop) # Finally set new UPM font.info.unitsPerEm = newUnitsPerEm return True def filter(self, glyph): if getattr(self.context, "skipCurrentFont", False): return False # Scale glyph self._scaleGlyph(glyph) # scale component offsets for i in range(len(glyph.components)): comp = glyph.components[i] xS, xyS, yxS, yS, xOff, yOff = comp.transformation comp.transformation = ( xS, xyS, yxS, yS, xOff * self.factor, yOff * self.factor, ) ``` #### File: Lib/ufo2ft/instructionCompiler.py ```python import array import logging from fontTools import ttLib from fontTools.pens.hashPointPen import HashPointPen from fontTools.ttLib.tables._g_l_y_f import ( OVERLAP_COMPOUND, ROUND_XY_TO_GRID, USE_MY_METRICS, ) logger = logging.getLogger(__name__) TRUETYPE_INSTRUCTIONS_KEY = "public.truetype.instructions" TRUETYPE_ROUND_KEY = "public.truetype.roundOffsetToGrid" TRUETYPE_METRICS_KEY = "public.truetype.useMyMetrics" TRUETYPE_OVERLAP_KEY = "public.truetype.overlap" OBJECT_LIBS_KEY = "public.objectLibs" class InstructionCompiler(object): def __init__(self, ufo, ttf): self.ufo = ufo self.font = ttf def _compile_program(self, key, table_tag): assert table_tag in ("prep", "fpgm") ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None) if ttdata: formatVersion = ttdata.get("formatVersion", None) if int(formatVersion) != 1: logger.error( f"Unknown formatVersion {formatVersion} " f"in key '{key}', " f"table '{table_tag}' will be empty in font." ) return asm = ttdata.get(key, None) if asm is not None: self.font[table_tag] = table = ttLib.newTable(table_tag) table.program = ttLib.tables.ttProgram.Program() table.program.fromAssembly(asm) # Roundtrip once, or if the font is dumped to XML before having # been saved, the assembly code if will look awful. table.program.fromBytecode(table.program.getBytecode()) def compile_cvt(self): cvts = [] ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None) if ttdata: formatVersion = ttdata.get("formatVersion", None) if int(formatVersion) != 1: logger.error( f"Unknown formatVersion {formatVersion} " f"in key 'controlValue', " f"table 'cvt' will be empty in font." ) return cvt_list = ttdata.get("controlValue", None) if cvt_list is not None: # Convert string keys to int cvt_dict = {int(v["id"]): v["value"] for v in cvt_list} # Find the maximum cvt index. # We can't just use the dict keys because the cvt must be # filled consecutively. max_cvt = max(cvt_dict.keys()) # Make value list, filling entries for missing keys with 0 cvts = [cvt_dict.get(i, 0) for i in range(max_cvt + 1)] if cvts: # Only write cvt to font if it contains any values self.font["cvt "] = cvt = ttLib.newTable("cvt ") cvt.values = array.array("h", cvts) def compile_fpgm(self): self._compile_program("fontProgram", "fpgm") def compile_glyf(self): for name in sorted(self.ufo.keys()): glyph = self.ufo[name] ttdata = glyph.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None) if name not in self.font["glyf"]: if ttdata is not None: logger.warning( f"Glyph '{name}' not found in font, " "skipping compilation of TrueType instructions " "for this glyph." ) logger.debug(f"UFO keys: {list(self.ufo.keys())}") logger.debug(f"glyf keys: {list(self.font['glyf'].keys())}") continue glyf = self.font["glyf"][name] if ttdata is not None: formatVersion = ttdata.get("formatVersion", None) if int(formatVersion) != 1: logger.error( f"Unknown formatVersion {formatVersion} " "in glyph '{name}', it will have " "no instructions in font." ) continue # Check if glyph hash matches the current outlines hash_pen = HashPointPen(glyph.width, self.ufo) glyph.drawPoints(hash_pen) glyph_id = ttdata.get("id", None) if glyph_id is None or glyph_id != hash_pen.hash: logger.error( f"Glyph hash mismatch, glyph '{name}' will have " "no instructions in font." ) continue # Compile the glyph program asm = ttdata.get("assembly", None) if asm is not None: glyf.program = ttLib.tables.ttProgram.Program() glyf.program.fromAssembly(asm) # Roundtrip once, or if the font is dumped to XML before # having been saved, the assembly code if will look awful. glyf.program.fromBytecode(glyf.program.getBytecode()) # Handle composites if glyf.isComposite(): # Remove empty glyph programs from composite glyphs if hasattr(glyf, "program") and not glyf.program: delattr(glyf, "program") # Set component flags # We need to decide when to set the flags. # Let's assume if any lib key is not there, or the component # doesn't have an identifier, we should leave the flags alone. use_my_metrics_comp = None for i, c in enumerate(glyf.components): if i >= len(glyph.components): logger.error( "Number of components differ between UFO and TTF " f"in glyph '{name}' ({len(glyph.components)} vs. " f"{len(glyf.components)}, not setting flags in " "additional components." ) break ufo_component_id = glyph.components[i].identifier if ( ufo_component_id is not None and OBJECT_LIBS_KEY in glyph.lib and ufo_component_id in glyph.lib[OBJECT_LIBS_KEY] and ( TRUETYPE_ROUND_KEY in glyph.lib[OBJECT_LIBS_KEY][ufo_component_id] or TRUETYPE_METRICS_KEY in glyph.lib[OBJECT_LIBS_KEY][ufo_component_id] ) ): component_lib = glyph.lib[OBJECT_LIBS_KEY][ufo_component_id] c.flags &= ~ROUND_XY_TO_GRID if component_lib.get(TRUETYPE_ROUND_KEY, False): c.flags |= ROUND_XY_TO_GRID c.flags &= ~USE_MY_METRICS if component_lib.get(TRUETYPE_METRICS_KEY, False): if use_my_metrics_comp: logger.warning( "Ignoring USE_MY_METRICS flag on component " f"'{ufo_component_id}' because it has been set on " f"component '{use_my_metrics_comp}' already." ) else: c.flags |= USE_MY_METRICS use_my_metrics_comp = ufo_component_id # We might automatically set the flags if no data is present, # but: # - https://github.com/googlefonts/ufo2ft/pull/425 recommends # against setting the ROUND_XY_TO_GRID flag # - USE_MY_METRICS has been set already by # outlineCompiler.OutlineTTFCompiler.autoUseMyMetrics if i == 0 and TRUETYPE_OVERLAP_KEY in glyph.lib: # Set OVERLAP_COMPOUND on the first component only c.flags &= ~OVERLAP_COMPOUND if glyph.lib.get(TRUETYPE_OVERLAP_KEY, False): c.flags |= OVERLAP_COMPOUND def compile_maxp(self): maxp = self.font["maxp"] ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None) if ttdata: for name in ( "maxStorage", "maxFunctionDefs", "maxInstructionDefs", "maxStackElements", # "maxSizeOfInstructions", # Is recalculated below "maxZones", "maxTwilightPoints", ): value = ttdata.get(name, None) if value is not None: setattr(maxp, name, value) # Recalculate maxp.maxSizeOfInstructions sizes = [ len(glyph.program.getBytecode()) for glyph in self.font["glyf"].glyphs.values() if hasattr(glyph, "program") ] + [0] maxp.maxSizeOfInstructions = max(sizes) def compile_prep(self): self._compile_program("controlValueProgram", "prep") def compile(self): self.compile_cvt() self.compile_fpgm() self.compile_prep() self.compile_glyf() # maxp depends on the other programs, to it needs to be last self.compile_maxp() ```
{ "source": "jenskutilek/VisualTrueType", "score": 2 }
#### File: VisualTrueType/tests/test_vttcompile.py ```python import vttcompilepy as vtt from fontTools.ttLib import TTFont from pathlib import Path import pytest TESTDATA = Path(__file__).parent / "data" IN_SELAWIK = TESTDATA / "Selawik-variable.ttf" OUT_COMPILED = TESTDATA / "out_c.ttf" OUT_COMPILED_STRIPPED = TESTDATA / "out_c_s.ttf" compiler = vtt.Compiler(IN_SELAWIK) compiler.compile_all() compiler.save_font(OUT_COMPILED, vtt.StripLevel.STRIP_NOTHING) compiler.save_font(OUT_COMPILED_STRIPPED, vtt.StripLevel.STRIP_SOURCE) @pytest.fixture def original_font(): return TTFont(IN_SELAWIK) @pytest.fixture def compiled_font(): return TTFont(OUT_COMPILED) @pytest.fixture def compiled_stripped_font(): return TTFont(OUT_COMPILED_STRIPPED) def test_compiled(original_font, tmp_path: Path, compiled_font): ttorig = original_font ttcomp = compiled_font assert ttorig['maxp'].numGlyphs == ttcomp['maxp'].numGlyphs assert ttorig['maxp'] == ttcomp['maxp'] assert ttorig['fpgm'] == ttcomp['fpgm'] assert ttorig['prep'] == ttcomp['prep'] glyf_orig = ttorig['glyf'] glyf_comp = ttcomp['glyf'] for glyph1 in ttorig.glyphOrder: print(glyph1) assert glyf_orig[glyph1].isComposite() == glyf_comp[glyph1].isComposite() assert glyf_orig[glyph1].getCoordinates(glyf_orig) == glyf_comp[glyph1].getCoordinates(glyf_comp) assert glyf_orig[glyph1].getComponentNames(glyf_orig) == glyf_comp[glyph1].getComponentNames(glyf_comp) assert hasattr(glyf_orig[glyph1],'program') == hasattr(glyf_comp[glyph1],'program') haveInstructions = hasattr(glyf_orig[glyph1], "program") if haveInstructions: #orig_codes = glyf_orig[glyph1].program.getBytecode() orig_assembly = glyf_orig[glyph1].program.getAssembly() print(orig_assembly) #comp_codes = glyf_comp[glyph1].program.getBytecode() comp_assembly = glyf_comp[glyph1].program.getAssembly() print(comp_assembly) assert orig_assembly == comp_assembly def test_stripped(original_font, tmp_path: Path, compiled_stripped_font): ttorig = original_font ttstrip = compiled_stripped_font assert("TSI0" in ttorig) assert("TSI0" not in ttstrip) assert("TSI1" in ttorig) assert("TSI1" not in ttstrip) assert("TSI2" in ttorig) assert("TSI2" not in ttstrip) assert("TSI3" in ttorig) assert("TSI3" not in ttstrip) assert("TSI5" in ttorig) assert("TSI5" not in ttstrip) ```
{ "source": "JensLanders/SHOP", "score": 2 }
#### File: shop/cascade/catalog.py ```python from __future__ import unicode_literals from django.template.loader import select_template from django.utils.translation import ugettext_lazy as _ from cms.plugin_pool import plugin_pool from shop import settings as shop_settings from .plugin_base import ShopPluginBase class ShopCatalogPlugin(ShopPluginBase): name = _("Catalog List View") require_parent = True parent_classes = ('BootstrapColumnPlugin', 'SimpleWrapperPlugin',) cache = False def get_render_template(self, context, instance, placeholder): return select_template([ '{}/catalog/product-list.html'.format(shop_settings.APP_LABEL), 'shop/catalog/product-list.html', ]) plugin_pool.register_plugin(ShopCatalogPlugin) class ShopAddToCartPlugin(ShopPluginBase): name = _("Add Product to Cart") require_parent = True parent_classes = ('BootstrapColumnPlugin',) cache = False def get_render_template(self, context, instance, placeholder): return select_template([ '{}/catalog/product-add2cart.html'.format(shop_settings.APP_LABEL), 'shop/catalog/product-add2cart.html', ]) plugin_pool.register_plugin(ShopAddToCartPlugin) ``` #### File: shop/cascade/plugin_base.py ```python from __future__ import unicode_literals from django.apps import apps from django.core.exceptions import ImproperlyConfigured from django.forms import ChoiceField, widgets from django.template import TemplateDoesNotExist from django.template.loader import select_template from django.utils.html import format_html from django.utils.module_loading import import_string from django.utils.translation import ugettext_lazy as _, pgettext_lazy from django.utils.safestring import mark_safe from cms.plugin_pool import plugin_pool from django.utils.encoding import python_2_unicode_compatible from cmsplugin_cascade.fields import PartialFormField from cmsplugin_cascade.plugin_base import CascadePluginBase from cmsplugin_cascade.link.forms import LinkForm from cmsplugin_cascade.link.plugin_base import LinkPluginBase, LinkElementMixin from cmsplugin_cascade.utils import resolve_dependencies from django_select2.forms import HeavySelect2Widget from shop import settings as shop_settings from shop.forms.base import DialogFormMixin from shop.models.cart import CartModel from shop.models.product import ProductModel from shop.rest.serializers import ProductSelectSerializer class ShopPluginBase(CascadePluginBase): module = "Shop" require_parent = False allow_children = False @python_2_unicode_compatible class ShopLinkElementMixin(LinkElementMixin): def __str__(self): return self.plugin_class.get_identifier(self) class ShopLinkPluginBase(ShopPluginBase): """ Base plugin for arbitrary buttons used during various checkout pages. """ allow_children = False fields = (('link_type', 'cms_page',), 'glossary',) glossary_field_map = {'link': ('link_type', 'cms_page',)} allow_children = False parent_classes = [] require_parent = False class Media: js = resolve_dependencies('shop/js/admin/shoplinkplugin.js') @classmethod def get_link(cls, obj): link = obj.glossary.get('link', {}) if link.get('type') == 'cmspage': if 'model' in link and 'pk' in link: if not hasattr(obj, '_link_model'): Model = apps.get_model(*link['model'].split('.')) try: obj._link_model = Model.objects.get(pk=link['pk']) except Model.DoesNotExist: obj._link_model = None if obj._link_model: return obj._link_model.get_absolute_url() else: # use the link type as special action keyword return link.get('type') def get_ring_bases(self): bases = super(ShopLinkPluginBase, self).get_ring_bases() bases.append('LinkPluginBase') return bases class ShopButtonPluginBase(ShopLinkPluginBase): """ Base plugin for arbitrary buttons used during various checkout pages. """ fields = ('link_content', ('link_type', 'cms_page',), 'glossary',) class Media: css = {'all': ('cascade/css/admin/bootstrap.min.css', 'cascade/css/admin/bootstrap-theme.min.css',)} js = resolve_dependencies('shop/js/admin/shoplinkplugin.js') @classmethod def get_identifier(cls, instance): return mark_safe(instance.glossary.get('link_content', '')) class HeavySelect2Widget(HeavySelect2Widget): def render(self, name, value, attrs=None, choices=None): try: result = ProductSelectSerializer(ProductModel.objects.get(pk=value)) choices = ((value, result.data['text']),) except ProductModel.DoesNotExist: choices = () html = super(HeavySelect2Widget, self).render(name, value, attrs=attrs, choices=choices) return html class ProductSelectField(ChoiceField): def __init__(self, *args, **kwargs): kwargs.setdefault('widget', HeavySelect2Widget(data_view='shop:select-product')) super(ProductSelectField, self).__init__(*args, **kwargs) def clean(self, value): "Since the ProductSelectField does not specify choices by itself, accept any returned value" try: return int(value) except ValueError: pass class CatalogLinkForm(LinkForm): """ Alternative implementation of `cmsplugin_cascade.TextLinkForm`, which allows to link onto the Product model, using its method ``get_absolute_url``. Note: In this form class the field ``product`` is missing. It is added later, when the shop's Product knows about its materialized model. """ LINK_TYPE_CHOICES = (('cmspage', _("CMS Page")), ('product', _("Product")), ('exturl', _("External URL")), ('email', _("Mail To")),) product = ProductSelectField(required=False, label='', help_text=_("An internal link onto a product from the shop")) def clean_product(self): if self.cleaned_data.get('link_type') == 'product': app_label = ProductModel._meta.app_label self.cleaned_data['link_data'] = { 'type': 'product', 'model': '{0}.{1}'.format(app_label, ProductModel.__name__), 'pk': self.cleaned_data['product'], } def set_initial_product(self, initial): try: # check if that product still exists, otherwise return nothing Model = apps.get_model(*initial['link']['model'].split('.')) initial['product'] = Model.objects.get(pk=initial['link']['pk']).pk except (KeyError, ValueError, Model.DoesNotExist): pass class CatalogLinkPluginBase(LinkPluginBase): """ Modified implementation of ``cmsplugin_cascade.link.LinkPluginBase`` which adds link type "Product", to set links onto arbitrary products of this shop. """ # glossary_fields = ( # PartialFormField('title', # widgets.TextInput(), # label=_("Title"), # help_text=_("Link's Title") # ), # ) + LinkPluginBase.glossary_fields glossary_field_map = {'link': ('link_type', 'cms_page', 'product', 'ext_url', 'mail_to',)} class Media: js = resolve_dependencies('shop/js/admin/shoplinkplugin.js') class DialogFormPluginBase(ShopPluginBase): """ Base class for all plugins adding a dialog form to a placeholder field. """ require_parent = True parent_classes = ('BootstrapColumnPlugin', 'ProcessStepPlugin', 'BootstrapPanelPlugin', 'SegmentPlugin', 'SimpleWrapperPlugin') CHOICES = (('form', _("Form dialog")), ('summary', _("Static summary")),) glossary_fields = ( PartialFormField('render_type', widgets.RadioSelect(choices=CHOICES), label=_("Render as"), initial='form', help_text=_("A dialog can also be rendered as a box containing a read-only summary."), ), ) @classmethod def register_plugin(cls, plugin): """ Register plugins derived from this class with this function instead of `plugin_pool.register_plugin`, so that dialog plugins without a corresponding form class are not registered. """ if not issubclass(plugin, cls): msg = "Can not register plugin class `{}`, since is does not inherit from `{}`." raise ImproperlyConfigured(msg.format(plugin.__name__, cls.__name__)) if plugin.get_form_class() is None: msg = "Can not register plugin class `{}`, since is does not define a `form_class`." raise ImproperlyConfigured(msg.format(plugin.__name__)) plugin_pool.register_plugin(plugin) @classmethod def get_form_class(cls): return getattr(cls, 'form_class', None) @classmethod def get_identifier(cls, instance): render_type = instance.glossary.get('render_type') render_type = dict(cls.CHOICES).get(render_type, '') return format_html(pgettext_lazy('get_identifier', "as {}"), render_type) def __init__(self, *args, **kwargs): super(DialogFormPluginBase, self).__init__(*args, **kwargs) self.FormClass = import_string(self.get_form_class()) def get_form_data(self, context, instance, placeholder): """ Returns data to initialize the corresponding dialog form. This method must return a dictionary containing * either `instance` - a Python object to initialize the form class for this plugin, * or `initial` - a dictionary containing initial form data, or if both are set, values from `initial` override those of `instance`. """ if issubclass(self.FormClass, DialogFormMixin): try: cart = CartModel.objects.get_from_request(context['request']) cart.update(context['request']) except CartModel.DoesNotExist: cart = None return {'cart': cart} return {} def get_render_template(self, context, instance, placeholder): render_type = instance.glossary.get('render_type') if render_type not in ('form', 'summary',): render_type = 'form' try: template_names = [ '{0}/checkout/{1}'.format(shop_settings.APP_LABEL, self.template_leaf_name).format(render_type), 'shop/checkout/{}'.format(self.template_leaf_name).format(render_type), ] return select_template(template_names) except (AttributeError, TemplateDoesNotExist): return self.render_template def render(self, context, instance, placeholder): """ Return the context to render a DialogFormPlugin """ request = context['request'] form_data = self.get_form_data(context, instance, placeholder) request._plugin_order = getattr(request, '_plugin_order', 0) + 1 if not isinstance(form_data.get('initial'), dict): form_data['initial'] = {} form_data['initial'].update(plugin_id=instance.id, plugin_order=request._plugin_order) bound_form = self.FormClass(**form_data) context[bound_form.form_name] = bound_form return super(DialogFormPluginBase, self).render(context, instance, placeholder) ```
{ "source": "jenslaufer/airbnb-data-collection", "score": 2 }
#### File: jenslaufer/airbnb-data-collection/airbnb.py ```python import logging import argparse import sys import time import webbrowser from lxml import html import psycopg2 import psycopg2.errorcodes from airbnb_config import ABConfig from airbnb_survey import ABSurveyByBoundingBox from airbnb_survey import ABSurveyByNeighborhood, ABSurveyByZipcode from airbnb_listing import ABListing import airbnb_ws # ============================================================================ # CONSTANTS # ============================================================================ # Script version # 3.4 June 2018: Minor tweaks, but now know that Airbnb searches do not return # listings for which there are no available dates. # 3.3 April 2018: Changed to use /api/ for -sb if key provided in config file # 3.2 April 2018: fix for modified Airbnb site. Avoided loops over room types # in -sb # 3.1 provides more efficient "-sb" searches, avoiding loops over guests and # prices. See example.config for details, and set a large max_zoom (eg 12). # 3.0 modified -sb searches to reflect new Airbnb web site design (Jan 2018) # 2.9 adds resume for bounding box searches. Requires new schema # 2.8 makes different searches subclasses of ABSurvey # 2.7 factors the Survey and Listing objects into their own modules # 2.6 adds a bounding box search # 2.5 is a bit of a rewrite: classes for ABListing and ABSurvey, and requests lib # 2.3 released Jan 12, 2015, to handle a web site update SCRIPT_VERSION_NUMBER = 3.4 # logging = logging.getLogger() def list_search_area_info(config, search_area): """ Print a list of the search areas in the database to stdout. """ try: conn = config.connect() cur = conn.cursor() cur.execute(""" select search_area_id from search_area where name=%s """, (search_area,)) result_set = cur.fetchall() cur.close() count = len(result_set) if count == 1: print("\nThere is one search area called", str(search_area), "in the database.") elif count > 1: print("\nThere are", str(count), "cities called", str(search_area), "in the database.") elif count < 1: print("\nThere are no cities called", str(search_area), "in the database.") sys.exit() sql_neighborhood = """select count(*) from neighborhood where search_area_id = %s""" sql_search_area = """select count(*) from search_area where search_area_id = %s""" for result in result_set: search_area_id = result[0] cur = conn.cursor() cur.execute(sql_neighborhood, (search_area_id,)) count = cur.fetchone()[0] cur.close() print("\t" + str(count) + " neighborhoods.") cur = conn.cursor() cur.execute(sql_search_area, (search_area_id,)) count = cur.fetchone()[0] cur.close() print("\t" + str(count) + " Airbnb cities.") except psycopg2.Error as pge: logging.error(pge.pgerror) logging.error("Error code %s", pge.pgcode) logging.error("Diagnostics %s", pge.diag.message_primary) cur.close() conn.rollback() raise except Exception: logging.error("Failed to list search area info") raise def list_surveys(config): """ Print a list of the surveys in the database to stdout. """ try: conn = config.connect() cur = conn.cursor() cur.execute(""" select survey_id, to_char(survey_date, 'YYYY-Mon-DD'), survey_description, search_area_id, status from survey where survey_date is not null and status is not null and survey_description is not null order by survey_id asc""") result_set = cur.fetchall() if result_set: template = "| {0:3} | {1:>12} | {2:>50} | {3:3} | {4:3} |" print (template.format("ID", "Date", "Description", "SA", "status")) for survey in result_set: (survey_id, survey_date, desc, sa_id, status) = survey print(template.format(survey_id, survey_date, desc, sa_id, status)) except Exception: logging.error("Cannot list surveys.") raise def db_ping(config): """ Test database connectivity, and print success or failure. """ try: conn = config.connect() if conn is not None: print("Connection test succeeded: {db_name}@{db_host}" .format(db_name=config.DB_NAME, db_host=config.DB_HOST)) else: print("Connection test failed") except Exception: logging.exception("Connection test failed") def db_add_survey(config, search_area): """ Add a survey entry to the database, so the survey can be run. Also returns the survey_id, in case it is to be used.. """ try: conn = config.connect() cur = conn.cursor() # Add an entry into the survey table, and get the survey_id sql = """ insert into survey (survey_description, search_area_id) select (name || ' (' || current_date || ')') as survey_description, search_area_id from search_area where name = %s returning survey_id""" cur.execute(sql, (search_area,)) survey_id = cur.fetchone()[0] # Get and print the survey entry cur.execute("""select survey_id, survey_date, survey_description, search_area_id from survey where survey_id = %s""", (survey_id,)) (survey_id, survey_date, survey_description, search_area_id) = cur.fetchone() conn.commit() cur.close() print("\nSurvey added:\n" + "\n\tsurvey_id=" + str(survey_id) + "\n\tsurvey_date=" + str(survey_date) + "\n\tsurvey_description=" + survey_description + "\n\tsearch_area_id=" + str(search_area_id)) return survey_id except Exception: logging.error("Failed to add survey for %s", search_area) raise def db_delete_survey(config, survey_id): """ Delete the listings and progress for a survey from the database. Set the survey to "incomplete" in the survey table. """ question = "Are you sure you want to delete listings for survey {}? [y/N] ".format(survey_id) sys.stdout.write(question) choice = input().lower() if choice != "y": print("Cancelling the request.") return try: conn = config.connect() cur = conn.cursor() # Delete the listings from the room table sql = """ delete from room where survey_id = %s """ cur.execute(sql, (survey_id,)) print("{} listings deleted from 'room' table".format(cur.rowcount)) # Delete the entry from the progress log table sql = """ delete from survey_progress_log_bb where survey_id = %s """ cur.execute(sql, (survey_id,)) # No need to report: it's just a log table # Update the survey entry sql = """ update survey set status = 0, survey_date = NULL where survey_id = %s """ cur.execute(sql, (survey_id,)) if cur.rowcount == 1: print("Survey entry updated") else: print("Warning: {} survey entries updated".format(cur.rowcount)) conn.commit() cur.close() except Exception: logging.error("Failed to delete survey for %s", survey_id) raise pass def db_get_room_to_fill(config, survey_id): """ For "fill" runs (loops over room pages), choose a random room that has not yet been visited in this "fill". """ for attempt in range(config.MAX_CONNECTION_ATTEMPTS): try: conn = config.connect() cur = conn.cursor() if survey_id == 0: # no survey specified sql = """ select room_id, survey_id from room where deleted is null order by random() limit 1 """ cur.execute(sql) else: sql = """ select room_id, survey_id from room where deleted is null and survey_id = %s order by random() limit 1 """ cur.execute(sql, (survey_id,)) (room_id, survey_id) = cur.fetchone() listing = ABListing(config, room_id, survey_id) cur.close() conn.commit() return listing except TypeError: logging.info("Finishing: no unfilled rooms in database --") conn.rollback() del config.connection return None except Exception: logging.exception("Error retrieving room to fill from db") conn.rollback() del config.connection return None def db_add_search_area(config, search_area, flag): """ Add a search_area to the database. """ try: logging.info("Adding search_area to database as new search area") # Add the search_area to the database anyway conn = config.connect() cur = conn.cursor() # check if it exists sql = """ select name from search_area where name = %s""" cur.execute(sql, (search_area,)) if cur.fetchone() is not None: print("City already exists: {}".format(search_area)) return True # Compute an abbreviation, which is optional and can be used # as a suffix for search_area views (based on a shapefile) # The abbreviation is lower case, has no whitespace, is 10 characters # or less, and does not end with a whitespace character # (translated as an underscore) abbreviation = search_area.lower()[:10].replace(" ", "_") while abbreviation[-1] == "_": abbreviation = abbreviation[:-1] # Insert the search_area into the table sql = """insert into search_area (name, abbreviation) values (%s, %s)""" cur.execute(sql, (search_area, abbreviation,)) sql = """select currval('search_area_search_area_id_seq') """ cur.execute(sql, ()) search_area_id = cur.fetchone()[0] # city_id = cur.lastrowid cur.close() conn.commit() print("Search area {} added: search_area_id = {}" .format(search_area, search_area_id)) print("Before searching, update the row to add a bounding box, using SQL.") print("I use coordinates from http://www.mapdevelopers.com/geocode_bounding_box.php.") print("The update statement to use is:") print("\n\tUPDATE search_area") print("\tSET bb_n_lat = ?, bb_s_lat = ?, bb_e_lng = ?, bb_w_lng = ?") print("\tWHERE search_area_id = {}".format(search_area_id)) print("\nThis program does not provide a way to do this update automatically.") except Exception: print("Error adding search area to database") raise def display_room(config, room_id): """ Open a web browser and show the listing page for a room. """ webbrowser.open(config.URL_ROOM_ROOT + str(room_id)) def display_host(config, host_id): """ Open a web browser and show the user page for a host. """ webbrowser.open(config.URL_HOST_ROOT + str(host_id)) def fill_loop_by_room(config, survey_id): """ Master routine for looping over rooms (after a search) to fill in the properties. """ room_count = 0 while room_count < config.FILL_MAX_ROOM_COUNT: try: if not config.HTTP_PROXY_LIST: logging.info( "No proxies left: re-initialize after %s seconds", config.RE_INIT_SLEEP_TIME) time.sleep(config.RE_INIT_SLEEP_TIME) # be nice config = ABConfig() room_count += 1 listing = db_get_room_to_fill(config, survey_id) if listing is None: return None else: if listing.ws_get_room_info(config.FLAGS_ADD): pass else: # Airbnb now seems to return nothing if a room has gone listing.save_as_deleted() except AttributeError: logging.error("Attribute error: marking room as deleted.") listing.save_as_deleted() except Exception as e: logging.error("Error in fill_loop_by_room: %s", str(type(e))) raise def parse_args(): """ Read and parse command-line arguments """ parser = argparse.ArgumentParser( description='Manage a database of Airbnb listings.', usage='%(prog)s [options]') parser.add_argument("-v", "--verbose", action="store_true", default=False, help="""write verbose (debug) output to the log file""") parser.add_argument("-c", "--config_file", metavar="config_file", action="store", default=None, help="""explicitly set configuration file, instead of using the default <username>.config""") # Only one argument! group = parser.add_mutually_exclusive_group() group.add_argument('-asa', '--addsearcharea', metavar='search_area', action='store', default=False, help="""add a search area to the database. A search area is typically a city, but may be a bigger region.""") group.add_argument('-asv', '--add_survey', metavar='search_area', type=str, help="""add a survey entry to the database, for search_area""") group.add_argument('-dbp', '--dbping', action='store_true', default=False, help='Test the database connection') group.add_argument('-dh', '--displayhost', metavar='host_id', type=int, help='display web page for host_id in browser') group.add_argument('-dr', '--displayroom', metavar='room_id', type=int, help='display web page for room_id in browser') group.add_argument('-dsv', '--delete_survey', metavar='survey_id', type=int, help="""delete a survey from the database, with its listings""") group.add_argument('-f', '--fill', nargs='?', metavar='survey_id', type=int, const=0, help='fill details for rooms collected with -s') group.add_argument('-lsa', '--listsearcharea', metavar='search_area', type=str, help="""list information about this search area from the database""") group.add_argument('-lr', '--listroom', metavar='room_id', type=int, help='list information about room_id from the database') group.add_argument('-ls', '--listsurveys', action='store_true', default=False, help='list the surveys in the database') group.add_argument('-psa', '--printsearcharea', metavar='search_area', action='store', default=False, help="""print the name and neighborhoods for search area (city) from the Airbnb web site""") group.add_argument('-pr', '--printroom', metavar='room_id', type=int, help="""print room_id information from the Airbnb web site""") group.add_argument('-ps', '--printsearch', metavar='survey_id', type=int, help="""print first page of search information for survey from the Airbnb web site""") group.add_argument('-psn', '--printsearch_by_neighborhood', metavar='survey_id', type=int, help="""print first page of search information for survey from the Airbnb web site, by neighborhood""") group.add_argument('-psz', '--printsearch_by_zipcode', metavar='survey_id', type=int, help="""print first page of search information for survey from the Airbnb web site, by zipcode""") group.add_argument('-psb', '--printsearch_by_bounding_box', metavar='survey_id', type=int, help="""print first page of search information for survey from the Airbnb web site, by bounding_box""") group.add_argument('-s', '--search', metavar='survey_id', type=int, help='search for rooms using survey survey_id') group.add_argument('-sn', '--search_by_neighborhood', metavar='survey_id', type=int, help='search for rooms using survey survey_id') group.add_argument('-sb', '--search_by_bounding_box', metavar='survey_id', type=int, help="""search for rooms using survey survey_id, by bounding box """) group.add_argument('-asb', '--add_and_search_by_bounding_box', metavar='search_area', type=str, help="""add a survey for search_area and search , by bounding box """) group.add_argument('-sz', '--search_by_zipcode', metavar='survey_id', type=int, help="""search for rooms using survey_id, by zipcode""") group.add_argument('-V', '--version', action='version', version='%(prog)s, version ' + str(SCRIPT_VERSION_NUMBER)) group.add_argument('-?', action='help') args = parser.parse_args() return (parser, args) def main(): """ Main entry point for the program. """ (parser, args) = parse_args() logging.basicConfig(format='%(levelname)-8s%(message)s') ab_config = ABConfig(args) try: if args.search: survey = ABSurveyByNeighborhood(ab_config, args.search) survey.search(ab_config.FLAGS_ADD) elif args.search_by_neighborhood: survey = ABSurveyByNeighborhood(ab_config, args.search_by_neighborhood) survey.search(ab_config.FLAGS_ADD) elif args.search_by_zipcode: survey = ABSurveyByZipcode(ab_config, args.search_by_zipcode) survey.search(ab_config.FLAGS_ADD) elif args.search_by_bounding_box: survey = ABSurveyByBoundingBox(ab_config, args.search_by_bounding_box) survey.search(ab_config.FLAGS_ADD) elif args.add_and_search_by_bounding_box: survey_id = db_add_survey(ab_config, args.add_and_search_by_bounding_box) survey = ABSurveyByBoundingBox(ab_config, survey_id) survey.search(ab_config.FLAGS_ADD) elif args.fill is not None: fill_loop_by_room(ab_config, args.fill) elif args.addsearcharea: db_add_search_area(ab_config, args.addsearcharea, ab_config.FLAGS_ADD) elif args.add_survey: db_add_survey(ab_config, args.add_survey) elif args.dbping: db_ping(ab_config) elif args.delete_survey: db_delete_survey(ab_config, args.delete_survey) elif args.displayhost: display_host(ab_config, args.displayhost) elif args.displayroom: display_room(ab_config, args.displayroom) elif args.listsearcharea: list_search_area_info(ab_config, args.listsearcharea) elif args.listroom: listing = ABListing(ab_config, args.listroom, None) listing.print_from_db() elif args.listsurveys: list_surveys(ab_config) elif args.printsearcharea: ws_get_city_info(ab_config, args.printsearcharea, ab_config.FLAGS_PRINT) elif args.printroom: listing = ABListing(ab_config, args.printroom, None) listing.get_room_info_from_web_site(ab_config.FLAGS_PRINT) elif args.printsearch: survey = ABSurveyByNeighborhood(ab_config, args.printsearch) survey.search(ab_config.FLAGS_PRINT) elif args.printsearch_by_neighborhood: survey = ABSurveyByNeighborhood(ab_config, args.printsearch_by_neighborhood) survey.search(ab_config.FLAGS_PRINT) elif args.printsearch_by_bounding_box: survey = ABSurveyByBoundingBox(ab_config, args.printsearch_by_bounding_box) survey.search(ab_config.FLAGS_PRINT) elif args.printsearch_by_zipcode: survey = ABSurveyByZipcode(ab_config, args.printsearch_by_zipcode) survey.search(ab_config.FLAGS_PRINT) else: parser.print_help() except (SystemExit, KeyboardInterrupt): sys.exit() except Exception: logging.exception("Top level exception handler: quitting.") sys.exit(0) if __name__ == "__main__": main() ```
{ "source": "jenslaufer/Price-Tracker", "score": 2 }
#### File: amazoncrawler/spiders/amazon_spider.py ```python import scrapy from ..items import AmazoncrawlerItem from tracker.models import Order from amazoncrawler.spiders import amazon_spider from amazoncrawler import pipelines from django.core.mail import send_mail from django.conf import settings class AmazonSpider(scrapy.Spider): name = "amazon" start_urls = [ '123'] print("1111") def __init__(self, *args,**kwargs): # super(AmazonSpider, self).__init__(*args, **kwargs) if kwargs.get('check'): self.check=1 # self.product_object=kwargs.get('order_object') # print(self.product_object) # print("11111111111111111111") # print(type(self.product_object)) # self.url = self.product_object.url self.url = kwargs.get('url') self.desired_amount = kwargs.get('price_d') self.p_name = kwargs.get('name_p') self.mail_user = kwargs.get('u_mail') self.start_urls=[self.url] print("11111111111111111111111111111111111111111111111111111111111111111111111") else: self.check=0 print("2222222222222222222222") self.url = kwargs.get('url') self.d_price = kwargs.get('d_price') self.start_urls=[self.url] self.author=kwargs.get('author') print(self.d_price) print(self.start_urls) print(self.author) # return scrapy.Request(self.start_urls[0],callback=self.parse) def parse(self, response): print("-11111-1-1--1-1--1-") if self.check==0: items = AmazoncrawlerItem() print("33333333333333333333") product_name = response.xpath("//div[@class='_1YokD2 _3Mn1Gg col-8-12']/div[@class='_1AtVbE col-12-12']/div[@class='aMaAEs']/div/h1/span/text()").extract()[0] # print(product_name) product_price = response.xpath("//div[@class='_1YokD2 _3Mn1Gg col-8-12']/div[@class='_1AtVbE col-12-12']/div[@class='aMaAEs']/div[@class='dyC4hf']/div[@class='CEmiEU']/div/div[@class='_30jeq3 _16Jk6d']/text()").extract()[0] s = response.xpath("//div[@class='_2c7YLP UtUXW0 _6t1WkM _3HqJxg']/div[@class='_1YokD2 _2GoDe3']/div[@class='_1YokD2 _3Mn1Gg col-5-12 _78xt5Y']/div/div/div/div/div/div[@class='_2mLllQ']/ul/li[@class='_20Gt85 _1Y_A6W']/div[@class='_1AuMiq P9aMAP']/div/@style").extract()[0] l = s.split('(') product_img = l[1][:-1] print(product_name) print(product_img) items['product_name'] = product_name items['price'] = product_price items['image'] = product_img items['author'] = self.author items['desired_price'] = self.d_price items['url'] = self.url #TO EXPLICITLY CALL PIPELINE obj = pipelines.AmazoncrawlerPipeline() obj.process_item(items,'amazon') # print("111111111111111111111111") print(items) yield items else: product_price = response.xpath("//div[@class='_1YokD2 _3Mn1Gg col-8-12']/div[@class='_1AtVbE col-12-12']/div[@class='aMaAEs']/div[@class='dyC4hf']/div[@class='CEmiEU']/div/div[@class='_30jeq3 _16Jk6d']/text()").extract()[0] print("Checking price") product_price = product_price.replace(",","") product_price = product_price[1:] # print(product_price,self.desired_amount) if int(product_price)<=int(self.desired_amount): subject='PRICE DROPPED!!!' print("Price Dropped") message= self.p_name + 'the product that you had set to track.Its Price dropped' email_from=settings.EMAIL_HOST_USER recipient_list = [self.mail_user] send_mail(subject, message, email_from, recipient_list,fail_silently=False) ```
{ "source": "jenslauterbach/devday_website", "score": 3 }
#### File: management/commands/remove_inactive_users.py ```python from datetime import timedelta from django.conf import settings from django.core.management import BaseCommand from django.utils import timezone from attendee.models import DevDayUser class Command(BaseCommand): help = ( "Remove all inactive users that are older than the configured ACTIVATION_DAYS" ) def add_arguments(self, parser): parser.add_argument( "--dry-run", action="store_true", dest="dry_run", default=False, help="Do not delete users", ) def handle(self, *args, **options): now = timezone.now() cutoff_date = now - timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) if options["verbosity"] > 1: self.stdout.write( "delete inactive users that are older than {}".format(cutoff_date) ) users = DevDayUser.objects.filter( is_active=False, date_joined__lte=cutoff_date ).order_by("email") if options["verbosity"] > 1: if options["dry_run"]: self.stdout.write("dry run, no actual deletions") self.stdout.write("will delete the following users:") for user in users: self.stdout.write( "{} (joined {})".format(user.get_username(), user.date_joined) ) if not options["dry_run"]: users.delete() ``` #### File: devday/utils/devday_toolbar.py ```python import locale from cms.toolbar import items from cms.toolbar_base import CMSToolbar from cms.cms_toolbars import ADMIN_MENU_IDENTIFIER, ADMINISTRATION_BREAK class DevDayToolbarBase(CMSToolbar): DEVDAY_BREAK = 'devday-break' def position_in_admin_section(self, name): parent = self.admin_menu first = parent.find_first( items.Break, identifier=ADMINISTRATION_BREAK) + 1 last = parent.find_first( items.Break, identifier=self.DEVDAY_BREAK) if not last: last_item = parent.add_break(self.DEVDAY_BREAK, position=first) last = items.ItemSearchResult( last_item, parent._item_position(last_item)) lname = locale.strxfrm(name.lower()) for item in parent.items[first.index:last.index]: if locale.strcoll( locale.strxfrm(str(item.name).lower()), lname) > 0: position = items.ItemSearchResult( item, parent._item_position(item)) break else: position = last return position.index def add_admin_link_item_alphabetically(self, name, url): position = self.position_in_admin_section(name) item = self.admin_menu.add_link_item(name, url=url, position=position) return item def add_admin_submenu_alphabetically(self, label, name): position = self.position_in_admin_section(name) menu = self.admin_menu.get_or_create_menu( label, name, position=position) return menu def add_link_item_alphabetically(self, menu, name, url): position = menu.get_alphabetical_insert_position( name, items.LinkItem) item = menu.add_link_item(name, url=url, position=position) return item def populate(self): self.admin_menu = self.toolbar.get_menu(ADMIN_MENU_IDENTIFIER) ``` #### File: devday/event/contextprocessors.py ```python from .models import Event def current_event_contextprocessor(request): current_event = Event.objects.current_event() if current_event is not None: return { "current_event": current_event, "talk_submission_open": current_event.submission_open, "attendee_registration_open": current_event.registration_open, "sessions_published": current_event.sessions_published, "event_feedback_open": current_event.feedback_open, } ``` #### File: event/migrations/0004_event_talkformat.py ```python from __future__ import unicode_literals from django.core.management.color import no_style from django.db import DEFAULT_DB_ALIAS, connections, migrations, models def create_dflt_format_fwd(apps, schema_manager): Event = apps.get_model("event", "Event") Talk = apps.get_model("talk", "Talk") TalkFormat = apps.get_model("talk", "TalkFormat") talkformat = TalkFormat.objects.create(id=1, name='Vortrag', duration=60) sequence_sql = connections[DEFAULT_DB_ALIAS].ops. \ sequence_reset_sql(no_style(), [TalkFormat]) if sequence_sql: with connections[DEFAULT_DB_ALIAS].cursor() as cursor: for command in sequence_sql: cursor.execute(command) talkformat.event_set.add(*Event.objects.all()) talkformat.talk_set.add(*Talk.objects.all()) def create_dflt_format_rev(apps, schema_manager): TalkFormat = apps.get_model("talk", "TalkFormat") TalkFormat.objects.filter(id=1).delete() class Migration(migrations.Migration): dependencies = [ ('talk', '0027_talkformats'), ('event', '0003_event_sessions_published'), ] operations = [ migrations.AddField( model_name='event', name='talkformat', field=models.ManyToManyField(to='talk.TalkFormat', verbose_name='Talk Formats'), ), migrations.RunPython(create_dflt_format_fwd, create_dflt_format_rev), ] ``` #### File: sponsoring/tests/test_appconfig.py ```python from django.apps import AppConfig, apps from django.test import SimpleTestCase from django.utils.translation import ugettext_lazy as _ class AppConfigTest(SimpleTestCase): def test_app_config(self): config = apps.get_app_config('sponsoring') self.assertIsInstance(config, AppConfig) self.assertEqual(config.verbose_name, _('Sponsor Management')) ``` #### File: sponsoring/tests/test_models.py ```python from django.test import TestCase from sponsoring.models import SponsoringPackage class SponsoringPackageTest(TestCase): def test_css_class(self): package = SponsoringPackage(package_type=1) self.assertEqual(package.css_class, 'gold') package = SponsoringPackage(package_type=2) self.assertEqual(package.css_class, 'silver') package = SponsoringPackage(package_type=3) self.assertEqual(package.css_class, 'bronze') ``` #### File: devday/sponsoring/views.py ```python from django.conf import settings from django.contrib.sites.models import Site from django.core.mail import EmailMessage from django.shortcuts import get_object_or_404 from django.template.loader import render_to_string from django.urls import reverse, reverse_lazy from django.views.generic import FormView, TemplateView, RedirectView from event.models import Event from sponsoring.forms import SponsoringContactForm from sponsoring.models import SponsoringPackage class SponsoringView(FormView): template_name = 'sponsoring/sponsoring.html' form_class = SponsoringContactForm success_url = reverse_lazy('sponsoring_thanks') email_subject_template = 'sponsoring/sponsoring_request_mail_subject.txt' email_body_template = 'sponsoring/sponsoring_request_mail_body.txt' event = None def dispatch(self, request, *args, **kwargs): self.event = get_object_or_404(Event, slug=self.kwargs['event']) return super().dispatch(request, *args, **kwargs) def form_valid(self, form): self.send_email(form) return super().form_valid(form) def get_form_kwargs(self): initial = super().get_form_kwargs() initial['event'] = self.event return initial def get_email_context(self, form): choices = dict(form.get_possible_choices()) chosen = [] for choice in form.cleaned_data['sponsoring_options']: chosen.append(choices[int(choice)]) return { 'event': self.event, 'organization': form.cleaned_data['organization'], 'contact_email': form.cleaned_data['email'], 'body_text': form.cleaned_data['body'], 'site': Site.objects.get_current(self.request), 'packages': chosen, 'request': self.request, } def send_email(self, form): context = self.get_email_context(form) email = EmailMessage( subject=render_to_string( self.email_subject_template, context, self.request), body=render_to_string( self.email_body_template, context, self.request), from_email=settings.DEFAULT_EMAIL_SENDER, to=settings.SPONSORING_RECIPIENTS, reply_to=[context['contact_email']], headers={'From': settings.SPONSORING_FROM_EMAIL}, ) email.send(fail_silently=False) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) packages = [] for package in SponsoringPackage.objects.filter(event=self.event): packages.append({ 'name': package.get_package_type_display(), 'css_class': package.css_class, 'pricing': package.pricing, 'package_items': [] }) for item in package.sponsoringpackageitem_set.all(): if item.is_header: packages[-1]['package_items'].append({ 'name': item.name, 'description': item.description, 'package_items': [], }) else: packages[-1]['package_items'][-1]['package_items'].append({ 'name': item.name, 'description': item.description, }) context['packages'] = packages return context class SponsoringThanksView(TemplateView): template_name = 'sponsoring/sponsoring_thanks.html' class RedirectToCurrentEventView(RedirectView): def get_redirect_url(self, *args, **kwargs): return reverse( 'sponsoring_view', kwargs={'event': Event.objects.current_event().slug}) ``` #### File: devday/talk/admin.py ```python from functools import update_wrapper from django.conf.urls import url from django.contrib import admin from django.http import HttpResponseRedirect from django.shortcuts import render from django.urls import reverse from django.utils.translation import gettext_lazy as _ from django.utils.translation import ngettext_lazy from formtools.wizard.views import SessionWizardView from attendee.models import Attendee from event.models import Event from speaker.models import PublishedSpeaker, Speaker from talk.forms import ( AddTalkSlotFormStep1, AddTalkSlotFormStep2, SessionReservationForm, TalkSlotForm, ) from talk.signals import send_reservation_confirmation_mail from .models import ( AttendeeFeedback, Room, SessionReservation, Talk, TalkDraftSpeaker, TalkFormat, TalkMedia, TalkPublishedSpeaker, TalkSlot, TimeSlot, Track, ) class PrefetchAdmin(object): # noinspection PyUnresolvedReferences def get_field_queryset(self, db, db_field, request): qs = super().get_field_queryset(db, db_field, request) if db_field.name in self.queryset_prefetch_fields: prefetch_fields = self.queryset_prefetch_fields[db_field.name] if qs is None: qs = prefetch_fields[0].objects.select_related(*prefetch_fields[1]) else: qs = qs.select_related(*prefetch_fields[1]) return qs class TalkMediaInline(admin.StackedInline): model = TalkMedia class TalkSlotInline(PrefetchAdmin, admin.StackedInline): model = TalkSlot fields = (("room", "time"),) queryset_prefetch_fields = { "room": (Room, ("event",)), "time": (TimeSlot, ("event",)), } def get_queryset(self, request): return ( super().get_queryset(request).select_related("room", "time", "room__event") ) @admin.register(Room) class RoomAdmin(admin.ModelAdmin): list_display = ["name", "event"] list_filter = ["event"] ordering = ["-event__title", "name"] class TalkDraftSpeakerInline(PrefetchAdmin, admin.StackedInline): model = TalkDraftSpeaker extra = 1 fields = ("draft_speaker",) queryset_prefetch_fields = {"draft_speaker": (Speaker, ("user",))} class TalkPublishedSpeakerInline(PrefetchAdmin, admin.StackedInline): model = TalkPublishedSpeaker extra = 1 fields = ("published_speaker",) queryset_prefetch_fields = {"published_speaker": (PublishedSpeaker, ("event",))} def formfield_for_foreignkey(self, db_field, request, **kwargs): field = super().formfield_for_foreignkey(db_field, request, **kwargs) field.queryset = field.queryset.filter(event__exact=request._obj_.event) return field @admin.register(Talk) class TalkAdmin(PrefetchAdmin, admin.ModelAdmin): list_display = ("title", "draft_speakers_joined", "event", "track") search_fields = ("title", "draft_speakers__name", "event__title", "track__name") list_filter = ["event", "track"] inlines = [ TalkDraftSpeakerInline, TalkPublishedSpeakerInline, TalkMediaInline, TalkSlotInline, ] ordering = ["title"] list_select_related = ["event", "track", "track__event"] filter_horizontal = ("talkformat",) prepopulated_fields = {"slug": ("title",)} readonly_fields = ("event",) actions = ["publish_talks", "process_waiting_list"] queryset_prefetch_fields = { "draft_speakers": (Speaker, ("user",)), "published_speakers": (PublishedSpeaker, ("speaker", "speaker__user", "event")), "track": (Track, ("event",)), } def draft_speakers_joined(self, obj): return ", ".join([speaker.name for speaker in obj.draft_speakers.all()]) draft_speakers_joined.name = _("Draft speakers") def get_queryset(self, request): return ( super() .get_queryset(request) .select_related("event", "track", "track__event") .prefetch_related("draft_speakers", "published_speakers") ) def publish_talks(self, request, queryset): if "apply" in request.POST: published = 0 for talk in queryset.all(): track_field_name = "selected_track-{}".format(talk.id) if request.POST[track_field_name]: track = Track.objects.get(id=int(request.POST[track_field_name])) talk.publish(track) published += 1 self.message_user( request, ngettext_lazy( "One Session has been published.", "%(count)d sessions have been published.", published, ) % {"count": published}, ) return HttpResponseRedirect(request.get_full_path()) return render( request, "talk/admin/publish_talks.html", context={"talks": queryset, "tracks": Track.objects.order_by("name")}, ) publish_talks.short_description = _("Publish selected sessions") def process_waiting_list(self, request, queryset): mailcount = 0 attendees = set() for talk in queryset.filter( spots__gt=0, event_id=Event.objects.current_event_id() ): confirmed_reservations = SessionReservation.objects.filter( talk=talk, is_confirmed=True ).count() if talk.spots > confirmed_reservations: waiting_reservations = ( SessionReservation.objects.filter(talk=talk, is_waiting=True) .select_related("attendee", "attendee__user") .order_by("created") ) for reservation in waiting_reservations[ : talk.spots - confirmed_reservations ]: user = reservation.attendee.user reservation.is_waiting = False reservation.save() send_reservation_confirmation_mail(request, reservation, user) attendees.add(user.email) mailcount += 1 if mailcount > 0: self.message_user( request, ngettext_lazy( "A confirmation mail has been sent to %(attendees)s.", "%(count)d confirmation mails have been sent to %(attendees)s.", mailcount, ) % {"count": mailcount, "attendees": ", ".join(attendees)}, ) return HttpResponseRedirect(request.get_full_path()) process_waiting_list.short_description = _( "Process waiting list for selected sessions" ) def get_form(self, request, obj=None, change=False, **kwargs): request._obj_ = obj return super().get_form(request, obj, change, **kwargs) class AddTalkSlotView(SessionWizardView): template_name = "talk/admin/talkslot_add_form.html" form_list = [AddTalkSlotFormStep1, AddTalkSlotFormStep2] def get_context_data(self, form, **kwargs): context = super().get_context_data(form, **kwargs) context["opts"] = TalkSlot._meta return context def get_form_initial(self, step): data = super().get_form_initial(step) if step == "0": data["event"] = Event.objects.current_event() return data def get_form_kwargs(self, step=None): kwargs = super().get_form_kwargs(step) if step == "1": kwargs["event"] = self.get_cleaned_data_for_step("0")["event"] return kwargs def done(self, form_list, **kwargs): data = self.get_all_cleaned_data() TalkSlot.objects.create(talk=data["talk"], room=data["room"], time=data["time"]) kwargs["admin"].message_user(self.request, _("Talk slot created successfully")) return HttpResponseRedirect(reverse("admin:talk_talkslot_changelist")) create_talk_slot = AddTalkSlotView.as_view() @admin.register(TalkSlot) class TalkSlotAdmin(admin.ModelAdmin): list_display = ["time", "event", "room", "talk"] list_filter = ["time__event"] list_select_related = ("time", "talk", "room", "time__event") form = TalkSlotForm # NOTYET autocomplete_fields = list_display, needs Django 2.x def event(self, obj): return obj.time.event event.short_desription = _("Event") def get_urls(self): def wrap(view): def wrapper(*args, **kwargs): kwargs["admin"] = self return self.admin_site.admin_view(view)(*args, **kwargs) return update_wrapper(wrapper, view) return [ url(r"^add/$", wrap(create_talk_slot), name="talkslot_add") ] + super().get_urls() @admin.register(TalkFormat) class TalkFormatAdmin(admin.ModelAdmin): pass @admin.register(TimeSlot) class TimeSlotAdmin(admin.ModelAdmin): list_display = ["name", "event", "start_time", "end_time", "text_body"] list_filter = ["event"] ordering = ["-event__title", "start_time"] @admin.register(Track) class TrackAdmin(admin.ModelAdmin): list_display = ["name", "event"] list_filter = ["event"] ordering = ["-event__title", "name"] @admin.register(SessionReservation) class SessionReservationAdmin(admin.ModelAdmin): list_display = ("email", "talk_title", "is_confirmed", "is_waiting") list_select_related = ("attendee", "talk", "attendee__user") list_filter = ("attendee__event", "is_confirmed", "is_waiting") ordering = ("talk__title", "attendee__user__email") form = SessionReservationForm def email(self, obj): return obj.attendee.user.email def talk_title(self, obj): return obj.talk.title @admin.register(AttendeeFeedback) class AttendeeFeedbackAdmin(admin.ModelAdmin): list_display = ("attendee_name", "talk_speakers", "talk_title", "score") list_select_related = ("attendee__user", "talk", "talk__event") readonly_fields = ("attendee", "talk") ordering = ("talk__title", "attendee__user__email") list_filter = ("talk__event",) def attendee_name(self, obj): return obj.attendee.user.email def talk_speakers(self, obj): return ", ".join( [ str(s.published_speaker) for s in TalkPublishedSpeaker.objects.filter(talk=obj.talk) ] ) def talk_title(self, obj): return obj.talk.title queryset_prefetch_fields = { "attendee": (Attendee, ("user", "event")), "talk": (Talk, ("title", "event")), "talk__published_speakers": (PublishedSpeaker, ("name", "event")), } def get_queryset(self, request): return ( super() .get_queryset(request) .select_related("talk", "attendee", "attendee__user", "attendee__event") ) ``` #### File: devday/talk/context_processors.py ```python from event.models import Event from talk.models import Talk def committee_member_context_processor(request): if request.user.is_authenticated: return { "is_committee_member": request.user.has_perms( ("talk.add_vote", "talk.add_talkcomment") ) } else: return {"is_committee_member": False} def reservation_context_processor(request): event = Event.objects.current_event() if event.sessions_published and not event.is_started(): return { "reservable_sessions": Talk.objects.filter( event=event, track__isnull=False, spots__gt=0 ).exists() } return {"reservable_sessions": False} ``` #### File: talk/migrations/0030_fill_session_slugs_from_titles.py ```python from __future__ import unicode_literals from django.db import migrations from django.utils.text import slugify def create_default_slug(apps, schema_manager): Talk = apps.get_model('talk', 'Talk') for talk in Talk.objects.all(): talk.slug = slugify(talk.title) talk.save() class Migration(migrations.Migration): dependencies = [ ('talk', '0029_auto_20181008_1422'), ] operations = [ migrations.RunPython(create_default_slug) ] ``` #### File: devday/twitterfeed/contextprocessors.py ```python from django.conf import settings from twitterfeed.models import Tweet def twitter_feed_context_processor(request): result = {} if request.path in settings.TWITTERFEED_PATHS: result['twitter_feed'] = Tweet.objects.filter(show_on_site__exact=True).select_related( 'user_profile_image').order_by('-created_at')[:5] return result ``` #### File: twitterfeed/tests/test_admin.py ```python from django.test import TestCase from django.utils import timezone from twitterfeed.admin import show_on_site, hide_on_site from twitterfeed.models import Tweet class TestTweetAdminActions(TestCase): def setUp(self): self.tweet1 = Tweet.objects.create( twitter_id='0815', created_at=timezone.now(), show_on_site=False) self.tweet2 = Tweet.objects.create( twitter_id='4711', created_at=timezone.now(), show_on_site=True) def test_show_on_site(self): show_on_site(None, None, Tweet.objects.all()) self.tweet1.refresh_from_db() self.assertTrue(self.tweet1.show_on_site) def test_hide_on_site(self): hide_on_site(None, None, Tweet.objects.all()) self.tweet2.refresh_from_db() self.assertFalse(self.tweet2.show_on_site) ```
{ "source": "jenslawerp/bpl_erp", "score": 2 }
#### File: doctype/call_off_order/call_off_order.py ```python from __future__ import unicode_literals import frappe from frappe.model.document import Document from frappe.utils import money_in_words class CallOffOrder(Document): def get_call_off_purchase_order(self): po = frappe.get_doc("Purchase Order",self.purchase_order) return po.as_dict() def get_po_amount_in_words(self): if self.purchase_order: po = frappe.get_doc("Purchase Order",self.purchase_order) return money_in_words(po.total) def get_service_agreement(self): if self.service_agreement: sa = frappe.get_doc('Service Agreement', self.service_agreement) return sa.as_dict() def get_po_items(self, update=False): if self.purchase_order: po = frappe.get_doc("Purchase Order",self.purchase_order) if not update: return po.items self.set("po_items",[]) for item in po.items: d = frappe.new_doc("COO Purchase Order Items") d.parent = self.name d.parenttype = "Call Off Order" d.parentfield = "po_items" d.item_code = item.item_code; d.qty = item.qty; d.uom = item.uom; d.unit_price = item.rate; d.total_price = item.amount; d.part_number = item.supplier_part_no; d.description = item.description; self.po_items.append(d) def validate(self): self.collect_po_details() self.get_po_items(update=True) self.set_signature_placeholder() def collect_po_details(self): if self.purchase_order: po = frappe.get_doc("Purchase Order",self.purchase_order) self.purchase_order_price = po.total self.purchase_order_date = po.transaction_date self.po_price_words = self.get_po_amount_in_words() def set_signature_placeholder(self): for i in self.signatories: i.signature = "." ```
{ "source": "jensln/AoC-2020", "score": 3 }
#### File: jensln/AoC-2020/day8.py ```python from helper import lines_from_day from functools import cache def parse(): for line in lines_from_day(8): instruction, operand = line.split(" ") yield (instruction, int(operand)) class Machine: def __init__(self, program): self.prog = program # list of (instruction, operand) self.accu = 0 self.prcn = 0 self.visited = set() def acc(self, n): self.accu += n def jmp(self, n): self.prcn += n-1 def nop(self, _): pass def run(self, start=0): self.prcn = start while (self.prcn not in self.visited) \ and (0<=self.prcn<len(self.prog)): self.visited.add(self.prcn) inst, op = self.prog[self.prcn] { "acc": self.acc, "jmp": self.jmp, "nop": self.nop, }[inst](op) self.prcn += 1 return (self.prcn not in self.visited) def postmortem(self): if (self.prcn not in self.visited): # if finished. print(f"Reached end of program with acc={self.accu}.") else: inst, op = self.prog[self.prcn] print(f"Cancelled with acc={self.accu} after" \ +f" entering an infinite loop from {inst} {op}" \ +f" at {self.prcn}.") program = list(parse()) print("Part 1:", end=' ') m = Machine(program) m.run() m.postmortem() class Repairer: def __init__(self, program): self.prog = program def search(self): # brute force ... for (i, (instr, op)) in enumerate(self.prog): pm = self.prog.copy() if instr == "acc": continue else: ni = ("jmp" if (instr == "nop") else "nop") pm[i] = [ni, op] if (m := Machine(pm)).run(): m.postmortem() break print("Part 2:") rp = Repairer(program) rp.search() ```
{ "source": "jensm04/pool-temperature", "score": 3 }
#### File: jensm04/pool-temperature/measure_temperature.py ```python import os import glob import time import sys import datetime from influxdb import InfluxDBClient os.system('modprobe w1-gpio') os.system('modprobe w1-therm') host = "" port = 8086 user = "" password = "" dbname = "" base_dir = '/sys/bus/w1/devices/' device_folder = glob.glob(base_dir + '28*')[0] device_file = device_folder + '/w1_slave' client = InfluxDBClient(host, port, user, password, dbname) measurement = "pool_temperature" # the name of the measurement you'd like to use location = "poolside" def read_temp_raw(): f = open(device_file, 'r') lines = f.readlines() f.close() return lines def read_temp(): lines = read_temp_raw() iso = time.ctime() while lines[0].strip()[-3:] != 'YES': time.sleep(0.2) lines = read_temp_raw() equals_pos = lines[1].find('t=') if equals_pos != -1: temp_string = lines[1][equals_pos+2:] temp_c = float(temp_string) / 1000.0 data = [ { "measurement": measurement, "tags": { "location": location, }, "fields": { "temperature" : temp_c } } ] client.write_points(data) return temp_c read_temp() ```
{ "source": "jensm04/speedtest-influxdb", "score": 2 }
#### File: jensm04/speedtest-influxdb/speedtest.py ```python import time import json import subprocess from subprocess import PIPE from influxdb import InfluxDBClient # InfluxDB Settings DB_ADDRESS = '' DB_PORT = 8086 DB_USER = '' DB_PASSWORD = '' DB_DATABASE = '' influxdb_client = InfluxDBClient( DB_ADDRESS, DB_PORT, DB_USER, DB_PASSWORD, None) def init_db(): databases = influxdb_client.get_list_database() if len(list(filter(lambda x: x['name'] == DB_DATABASE, databases))) == 0: influxdb_client.create_database( DB_DATABASE) # Create if does not exist. else: influxdb_client.switch_database(DB_DATABASE) # Switch to if does exist. def format_for_influx(cliout): data = json.loads(cliout) # There is additional data in the speedtest-cli output but it is likely not necessary to store. influx_data = [ { 'measurement': 'ping', 'time': data['timestamp'], 'fields': { 'jitter': data['ping']['jitter'], 'latency': data['ping']['latency'] } }, { 'measurement': 'download', 'time': data['timestamp'], 'fields': { # Byte to Megabit 'bandwidth': data['download']['bandwidth'] / 125000, 'bytes': data['download']['bytes'], 'elapsed': data['download']['elapsed'] } }, { 'measurement': 'upload', 'time': data['timestamp'], 'fields': { # Byte to Megabit 'bandwidth': data['upload']['bandwidth'] / 125000, 'bytes': data['upload']['bytes'], 'elapsed': data['upload']['elapsed'] } }, { 'measurement': 'packetLoss', 'time': data['timestamp'], 'fields': { 'packetLoss': float(data.get('packetLoss', 0.0)) } }, { 'measurement': 'server', 'time': data['timestamp'], 'fields': { 'name': data['server']['name'], 'location': data['server']['location'], 'host': data['server']['host'] } }, { 'measurement': 'identification', 'time': data['timestamp'], 'fields': { 'id': data['result']['id'] } } ] return influx_data def main(): init_db() # Setup the database if it does not already exist. speedtest = subprocess.run( ["speedtest", "--accept-license", "--accept-gdpr", "-f", "json"], stdout=PIPE, stderr=PIPE) if speedtest.returncode == 0: # Speedtest was successful. data = format_for_influx(speedtest.stdout) print("Speedtest Successful:") if influxdb_client.write_points(data) == True: print("Data written to DB successfully") else: # Speedtest failed. print("Speedtest Failed:") print(speedtest.stderr) print(speedtest.stdout) if __name__ == '__main__': print('Speedtest CLI Data Logger to InfluxDB') main() ```
{ "source": "JensMadsen/gcloud-aio", "score": 2 }
#### File: gcloud-aio/bigquery/nox.py ```python import os import nox LOCAL_DEPS = ('../auth/', ) @nox.session @nox.parametrize('python_version', ['3.6', '3.7']) def unit_tests(session, python_version): session.interpreter = f'python{python_version}' session.virtualenv_dirname = f'unit-{python_version}' session.install('pytest', 'pytest-cov', *LOCAL_DEPS) session.install('-e', '.') session.run( 'py.test', '--quiet', '--cov=gcloud.aio.bigquery', '--cov=tests.unit', '--cov-append', '--cov-report=', os.path.join('tests', 'unit'), *session.posargs) @nox.session @nox.parametrize('python_version', ['3.6', '3.7']) def integration_tests(session, python_version): if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): session.skip('Credentials must be set via environment variable.') session.interpreter = f'python{python_version}' session.virtualenv_dirname = f'integration-{python_version}' session.install('aiohttp', 'pytest', 'pytest-asyncio', *LOCAL_DEPS) session.install('.') session.run('py.test', '--quiet', 'tests/integration') @nox.session @nox.parametrize('python_version', ['3.7']) def lint_setup_py(session, python_version): session.interpreter = f'python{python_version}' session.virtualenv_dirname = 'setup' session.install('docutils', 'Pygments') session.run( 'python', 'setup.py', 'check', '--restructuredtext', '--strict') @nox.session @nox.parametrize('python_version', ['3.7']) def cover(session, python_version): session.interpreter = f'python{python_version}' session.virtualenv_dirname = 'cover' session.install('codecov', 'coverage', 'pytest-cov') session.run('coverage', 'report', '--show-missing') session.run('codecov') session.run('coverage', 'erase') ``` #### File: aio/kms/kms.py ```python import aiohttp from gcloud.aio.auth import Token API_ROOT = 'https://cloudkms.googleapis.com/v1' LOCATION = 'global' SCOPES = [ 'https://www.googleapis.com/auth/cloudkms', ] class KMS: def __init__(self, project, service_file, keyproject, keyring, keyname, location=LOCATION, session=None, token=None): # pylint: disable=too-many-arguments self.api_root = (f'{API_ROOT}/projects/{keyproject}/' f'locations/{location}/keyRings/{keyring}/' f'cryptoKeys/{keyname}') self.session = session self.token = token or Token(project, service_file, scopes=SCOPES, session=self.session) async def headers(self): token = await self.token.get() return { 'Authorization': f'Bearer {token}', 'Content-Type': 'application/json', } # https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/decrypt async def decrypt(self, ciphertext, session=None): url = f'{self.api_root}:decrypt' body = { 'ciphertext': ciphertext, } if not self.session: self.session = aiohttp.ClientSession(conn_timeout=10, read_timeout=10) s = session or self.session resp = await s.post(url, headers=await self.headers(), json=body) resp.raise_for_status() return (await resp.json())['plaintext'] # https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt async def encrypt(self, plaintext, session=None): url = f'{self.api_root}:encrypt' body = { 'plaintext': plaintext, } if not self.session: self.session = aiohttp.ClientSession(conn_timeout=10, read_timeout=10) s = session or self.session resp = await s.post(url, headers=await self.headers(), json=body) resp.raise_for_status() return (await resp.json())['ciphertext'] ``` #### File: aio/kms/utils.py ```python import base64 def decode(payload): """ https://en.wikipedia.org/wiki/Base64#URL_applications modified Base64 for URL variants exist, where the + and / characters of standard Base64 are respectively replaced by - and _ """ variant = payload.replace('-', '+').replace('_', '/') return base64.b64decode(variant).decode() def encode(payload): """ https://en.wikipedia.org/wiki/Base64#URL_applications modified Base64 for URL variants exist, where the + and / characters of standard Base64 are respectively replaced by - and _ """ if not isinstance(payload, bytes): payload = payload.encode('utf-8') encoded = base64.b64encode(payload) return encoded.replace(b'+', b'-').replace(b'/', b'_').decode('utf-8') ``` #### File: aio/storage/blob.py ```python class Blob: def __init__(self, bucket, name, data): self.__dict__.update(**data) self.bucket = bucket self.name = name self.size = int(self.size) @property def chunk_size(self): return self.size + (262144 - (self.size % 262144)) async def download_as_string(self, session=None): return await self.bucket.storage.download_as_string(self.bucket.name, self.name, session=session) async def upload_from_string(self, data, session=None): content = await self.bucket.storage.upload(self.bucket.name, self.name, data, session=session) self.__dict__.update(content) return content ```
{ "source": "jensmcatanho/data-driven_astronomy", "score": 3 }
#### File: 1b/2_mean_of_a_set_of_signals/program.py ```python import numpy as np def mean_datasets(files): datasets = [] for file in files: datasets.append(np.loadtxt(file, delimiter=',')) result = np.mean(datasets, axis=0) return np.round(result, decimals=1) if __name__ == '__main__': # Test Case 1 print(mean_datasets(['data1.csv', 'data2.csv', 'data3.csv'])) # Test Case 2 print(mean_datasets(['data4.csv', 'data5.csv', 'data6.csv'])) ``` #### File: 2b/2_heavenly_angles/program.py ```python import numpy as np def angular_dist(r1, d1, r2, d2): r1_rad = np.radians(r1) d1_rad = np.radians(d1) r2_rad = np.radians(r2) d2_rad = np.radians(d2) a = np.sin(np.abs(d1_rad - d2_rad) * 0.5) ** 2 b = np.cos(d1_rad) * np.cos(d2_rad) * (np.sin(np.abs(r1_rad - r2_rad) * 0.5) ** 2) d = 2 * np.arcsin(np.sqrt(a + b)) return np.degrees(d) if __name__ == '__main__': # Test Case 1 print(angular_dist(21.07, 0.1, 21.15, 8.2)) # Test Case 2 print(angular_dist(10.3, -3, 24.3, -29)) ```
{ "source": "JensNevens/AtariDominator", "score": 3 }
#### File: JensNevens/AtariDominator/logs.py ```python from tensorflow.python.summary import event_accumulator import numpy as np import pandas as pd import sys def create_csv(inpath, outpath): sg = {event_accumulator.COMPRESSED_HISTOGRAMS: 1, event_accumulator.IMAGES: 1, event_accumulator.AUDIO: 1, event_accumulator.SCALARS: 0, event_accumulator.HISTOGRAMS: 1} ea = event_accumulator.EventAccumulator(inpath, size_guidance=sg) ea.Reload() scalar_tags = ea.Tags()['scalars'] df = pd.DataFrame(columns=scalar_tags) for tag in scalar_tags: events = ea.Scalars(tag) scalars = np.array(map(lambda x: x.value, events)) df.loc[:, tag] = scalars df.to_csv(outpath) if __name__ == '__main__': args = sys.argv inpath = args[1] outpath = args[2] create_csv(inpath, outpath) ``` #### File: AtariDominator/plots/plot.py ```python import pandas as pd import matplotlib.pyplot as plt import numpy as np import os import sys def plot(data_dir): # Get all files in data_dir paths = [os.path.join(data_dir, f) for f in os.listdir(data_dir)] paths = [path for path in paths if os.path.splitext(path)[-1].lower() == '.csv'] # Create a huge df with MultiIndex inner = list(range(len(paths))) df = pd.read_csv(paths[0], index_col=0) outer = np.array(df.columns) iterables = [outer, inner] cols = pd.MultiIndex.from_product(iterables, names=['metric', 'idx']) df = pd.DataFrame(columns=cols) # Dump all the data into it for i, path in enumerate(paths): df_in = pd.read_csv(path, index_col=0) for column in df_in: df.loc[:, (column,i)] = df_in[column] # Compute the means and stds for metric in df.columns.get_level_values(0).unique(): df.loc[:, (metric, 'mean')] = df[metric].mean(axis=1) df.loc[:, (metric, 'std')] = df[metric].std(axis=1) # Create plots for every metric for metric in df.columns.get_level_values(0).unique(): fig, ax = plt.subplots() start = 99999 length = len(df) step = 50000 X = list(range(start, (start+(length*step)), step)) Y = df.loc[:, (metric, 'mean')] Y = Y.rolling(window=10, center=True).mean() bars = df.loc[:, (metric, 'std')] label = metric.split('/')[-1] ax.errorbar(X, Y, yerr=bars, fmt='b', errorevery=50, label=label) ax.legend(loc='best') ax.set_xlabel('steps') ax.grid(linestyle='--', linewidth=1, alpha=0.1) out_path = os.path.join(data_dir, label) + '.pdf' fig.savefig(out_path) if __name__ == '__main__': args = sys.argv data_dir = args[1] plot(data_dir) ```
{ "source": "jen-soft/easy_web_crawler", "score": 2 }
#### File: jen-soft/easy_web_crawler/run_spider.py ```python import os from scrapy.crawler import CrawlerProcess from scrapy.cmdline import execute, get_project_settings from my_crawler.spiders.yts__am import YtsAmSpider BASE_DIR = os.path.abspath(os.path.dirname(__file__)) def main(): result_file_path = os.path.join(BASE_DIR, 'data/debug_data.json') if os.path.exists(result_file_path): os.remove(result_file_path) settings = get_project_settings() settings.update({ 'LOG_FILE': None, # default stdout # 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)', 'FEED_URI': result_file_path, 'FEED_FORMAT': 'json', }) crawler = CrawlerProcess(settings) spider = YtsAmSpider() crawler.crawl(spider) crawler.start() crawler.stop() spider.log('--------------------------------------------------------------') spider.log('file saved at {file_path}'.format(file_path=result_file_path)) if __name__ == "__main__": main() ```
{ "source": "jen-soft/tools", "score": 3 }
#### File: jen-soft/tools/p.py ```python import math print(""" # - - - p.dir(obj) # print all publick atters from object # p.dir( str, True, 7 ) """.strip()) class origin: dir = dir def chunks(l, n): for i in range(0, len(l), n): yield l[i:i + n] def dir(obj, show_private=False, count_columns=5): # get object attributes if show_private: # allow: "_name..." obj_attr = [n for n in origin.dir(obj) if not n.startswith('__')] else: obj_attr = [n for n in origin.dir(obj) if not n.startswith('_')] # split list attributes for columns obj_attr.sort() column_len = int(math.ceil(len(obj_attr) / count_columns)) columns = list(chunks(obj_attr, column_len)) for column_items in columns: dummy_items = range(0, column_len - len(column_items)) column_items.extend(['' for d in dummy_items]) # text format columns (normalize width) for i, column_items in enumerate(columns): column_width = len(max(column_items, key=len)) formatted_items = [] for item in column_items: indent = ' ' * (column_width - len(item)) formatted_items.append(item + indent) columns[i] = formatted_items # print result data for line_data in list(zip(*columns)): line = ' '.join(line_data) print(line) print('\n') import json as _json print(""" # - - - p.json(obj) # print structured dates: dict, list, set, tuple # p.json({'id': 1, 'name': 'jen'}) """.strip()) def json(data): print(_json.dumps(data, indent=2, ensure_ascii=False)) print(""" # - - - """.strip()+'\n') if __name__ == '__main__': dir(str, True) json({'id': 1, 'username': 'jen-soft'}) ```
{ "source": "jensonjose/utilbox", "score": 2 }
#### File: jensonjose/utilbox/setup.py ```python import setuptools from utilbox import __conf__ def read_file(file_path): with open(file_path, "r") as target_file: return target_file.read() # retrieve information from package files package_version = __conf__.config_map["version"] package_requirements = read_file("requirements.txt").splitlines() package_long_description = read_file("README.md") package_list = setuptools.find_packages(exclude=["tests"]) config = { "name": "utilbox", "description": "Collection of utility packages for Python.", "long_description": package_long_description, "author": "<NAME>", "author_email": "<EMAIL>", "license": "MIT", "platforms": ["Any"], "url": "https://github.com/jensonjose/utilbox", "version": package_version, "install_requires": package_requirements, "packages": package_list, "classifiers": ["Development Status :: 3 - Alpha", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Topic :: Software Development", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Utilities"] } setuptools.setup(**config) ``` #### File: utilbox/tests/dir_utils_tests.py ```python import types import unittest from utilbox.os_utils import DirUtils class DirUtilsTest(unittest.TestCase): """ Base class for all tests, which defined common 'setUp' and 'tearDown' methods. """ def setUp(self): """ Prepare environment to run automated tests. """ import os self.test_data_root = "test_data" self.test_dirs = [os.path.normpath(self.test_data_root + "/" + "dir1"), os.path.normpath(self.test_data_root + "/" + "dir2")] self.test_files = ["file1.txt", "file2.txt", "file3.log", "file4.log"] for test_dir in self.test_dirs: if not os.path.exists(test_dir): os.makedirs(test_dir) for test_file_name in self.test_files: test_file = open(os.path.normpath(test_dir + "/" + test_file_name), "w+") test_file.write("This is the content for " + str(test_file_name) + ".") def tearDown(self): """ Restore environment to pre-test conditions. """ import shutil shutil.rmtree(self.test_data_root) class DirUtilsTestInit(DirUtilsTest): """ Class for testing various aspects of instantiation. """ pass class DirUtilsTestSetter(DirUtilsTest): """ Class for testing various aspects of setting attributes. """ pass class DirUtilsTestMethodInput(DirUtilsTest): """ Class for testing methods taking input parameters. """ pass class DirUtilsTestMethodReturnType(DirUtilsTest): """ Class for testing return types of all methods. """ def test_get_dir_entries(self): """ Test if returned value is a list. """ test_file_extensions = ["pdf"] for test_directory in self.test_dirs: for test_file_extension in test_file_extensions: self.assertIsInstance(DirUtils.get_dir_contents(test_directory, test_file_extension), types.ListType) class DirUtilsTestMethodReturnValue(DirUtilsTest): """ Class for testing return values of all methods against known values. """ pass if __name__ == '__main__': unittest.main() ``` #### File: utilbox/mail_utils/mail_utils.py ```python import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText __author__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Alpha" class MailUtils: """ Utility class containing methods for sending of email messages. """ def __init__(self, smtp_server, sender_email_id): self.smtp_server = smtp_server self.sender_email_id = sender_email_id def send_mail_plain(self, recipient_email_id, email_subject, reply_to_email, message_string): """ Sends a plain-text email message. :param recipient_email_id: The email ID of the recipient. :param email_subject: The subject of the email. :param reply_to_email: The 'reply-to' email address :param message_string: The body of the email message. :return: True, if message was sent successfully, False otherwise. :rtype: bool """ # Create message container - the correct MIME type is multipart/alternative. email_message = MIMEText(message_string) email_message['Subject'] = email_subject email_message['From'] = self.sender_email_id email_message['To'] = reply_to_email try: # Send the message via local SMTP server. smtp_session = smtplib.SMTP(self.smtp_server) # sendmail function takes 3 arguments: sender's address, recipient's address # and message to send - here it is sent as one string. smtp_session.sendmail(self.sender_email_id, recipient_email_id, email_message.as_string()) smtp_session.quit() return True except Exception as ex: import traceback traceback.format_exc() return False @staticmethod def _create_html_message(plain_message_string): """ Internal method to convert plain-text message string to HTML. :param plain_message_string: The message string to converted to HTML. :return: The HTML-based message string. :rtype: str """ return "<html><head></head><body><p>" + str(plain_message_string) + "</p></body></html>" def send_mail_html(self, recipient_email_id, email_subject, reply_to_email, message_string): """ Sends an HTML-format email message. :param recipient_email_id: The email ID of the recipient. :param email_subject: The subject of the email. :param reply_to_email: The 'reply-to' email address :param message_string: The body of the email message. :return: True, if message was sent successfully, False otherwise. :rtype: bool """ # Create message container - the correct MIME type is multipart/alternative. email_message = MIMEMultipart('alternative') email_message['Subject'] = email_subject email_message['From'] = self.sender_email_id email_message['To'] = reply_to_email # Create the body of the message (a plain-text and an HTML version). text = message_string html = self._create_html_message(message_string) # Record the MIME types of both parts - text/plain and text/html. part1 = MIMEText(text, 'plain') part2 = MIMEText(html, 'html') # Attach parts into message container. # According to RFC 2046, the last part of a multipart message, in this case # the HTML message, is best and preferred. email_message.attach(part1) email_message.attach(part2) try: # Send the message via local SMTP server. smtp_session = smtplib.SMTP(self.smtp_server) # sendmail function takes 3 arguments: sender's address, recipient's address # and message to send - here it is sent as one string. smtp_session.sendmail(self.sender_email_id, recipient_email_id, email_message.as_string()) smtp_session.quit() return True except Exception as ex: import traceback traceback.format_exc() return False ``` #### File: utilbox/markup_utils/html_utils.py ```python __author__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Alpha" class HtmlUtils: """ Utility class containing methods to manipulate HTML files. """ def __init__(self): pass ``` #### File: utilbox/os_utils/file_utils.py ```python import os import csv import types import shutil import datetime __author__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Alpha" class FileUtils: """ Utility class containing methods to manipulate files. """ def __init__(self): pass @staticmethod def check_valid_file(file_path): """ Checks if the given path points to a file or not. :param file_path: The full path of the file to be verified. :return: True, if specified path points to a valid file, False otherwise. :rtype: bool """ if os.path.isfile(file_path): return True raise False @staticmethod def get_file_extension(file_path): """ Gets extension of the file at the specified path. :param file_path: The full path of the file whose extension is to be extracted. :return: The file extension. :rtype: str """ return os.path.splitext(file_path)[1] @staticmethod def read_file(file_path): """ Reads a file from the specified path and returns list containing its lines. :param file_path: The full path of the file to be read. :return: List representing each line of the file. :rtype: list """ try: file_handler = open(file_path, "r") file_data = file_handler.readlines() file_handler.close() return file_data except Exception as ex: pass @staticmethod def get_file_last_line(file_path): """ Returns the last line of the specified file. :param file_path: The full path of the file to be read. :return: The last of the file. False in case empty/non-existent file is encountered. :rtype: str """ if FileUtils.check_valid_file(file_path): if FileUtils.get_file_metadata(file_path)["FILE_SIZE"] > 0: file_lines = FileUtils.read_file(file_path) return file_lines[len(file_lines)-1] return False @staticmethod def write_to_file(file_path, data, write_mode="a"): """ Writes supplied data to file at specified path. :param file_path: The full path of the file where data is to be written. :param data: The data to be written into the file. :param write_mode: The file write mode: w->Create new file, a->Append to existing file. The default write mode is 'append'. :return: True, if file write was successful, False otherwise. :rtype: bool """ try: file_path = os.path.normpath(file_path) file_handle = open(file_path, write_mode) file_handle.write(data) file_handle.close() return True except IOError as ex: return False @staticmethod def create_file_name_string(name_components, extension, separator="_"): """ Combines multiple components provided in a list with a file extension string to create a complete file name. Main use is to keep the actual code clutter-free. :param name_components: List of components to be part of the final file name. :param extension: Extension of the file. :param separator: Separator to be used for isolating name components. :return: The final file name string, if name_components is a valid list, False otherwise. :rtype: str """ if isinstance(name_components, types.ListType): if len(name_components) > 0: name_string = "" for component in name_components: name_string += component + separator name_string = name_string[:-1] name_string += extension return name_string return False @staticmethod def copy_file(src_path, dest_path): """ Copies a file from supplied source to destination path. :param src_path: The full path of the file source. :param dest_path: The full path of the file destination. :return: True, if copy was successful, False otherwise. :rtype: bool """ try: shutil.copy(src_path, dest_path) return True except Exception as ex: return False @staticmethod def load_csv(csv_file_path): """ Reads a CSV file and returns a list of rows, each containing header-value pairs. :param csv_file_path: The full path of the CSV file. :return: The list of CSV rows, as header-value pair dictionaries, False in case of Exception. :rtype: list """ try: csv_reader = csv.DictReader(open(csv_file_path)) return csv_reader except: return False @staticmethod def get_file_metadata(file_path, size_unit="k", time_format="%Y-%m-%d %I:%M:%S"): """ Returns file meta-data containing, - Last modified time - File size - File name - File parent directory - File full path - File extension :param file_path: The full path of the file to be analyzed. :param size_unit: Units in which to report file size. :param time_format: Format in which to report file modification time. :return: Dictionary containing relevant file meta data. :rtype: dict """ if FileUtils.check_valid_file(file_path): last_modified_time = datetime.datetime.fromtimestamp(os.path.getmtime(file_path)).strftime(time_format) # get file size in bytes file_size = os.path.getsize(file_path) base_unit = 1024.0 decimal_limit = 2 if size_unit == "b": pass elif size_unit == "k": file_size /= base_unit elif size_unit == "m": file_size = (file_size / base_unit) / base_unit elif size_unit == "g": file_size = ((file_size / base_unit) / base_unit) / base_unit # limit floating-point value to X decimal points if size_unit != "b": file_size = round(file_size, decimal_limit) return {"LAST_MODIFIED": str(last_modified_time), "SIZE": str(file_size), "NAME": str(os.path.basename(file_path)), "PARENT_DIRECTORY": str(os.path.dirname(file_path)), "FULL_PATH": str(file_path), "EXTENSION": FileUtils.get_file_extension(file_path)} return False @staticmethod def get_file_directory(file_path): """ Returns the directory path of the specified file. :param file_path: The full path of the file. :return: The directory path of the file. :rtype: str """ if FileUtils.check_valid_file(file_path): return str(os.path.dirname(file_path)) return False @staticmethod def detect_file_type(): """ Detects the file type and returns the identified type string. """ pass ```
{ "source": "jenson-shi/puship", "score": 2 }
#### File: jenson-shi/puship/puship.py ```python import subprocess try: # for py3 from urllib.request import urlopen, Request, HTTPError, URLError except: # for py2 from urllib2 import urlopen, Request, HTTPError, URLError try: # for py3 from urllib.parse import urlencode except: # for py2 from urllib import urlencode pushover_token = 'APP_TOKEN' pushover_user = 'USER_KEY' def _url_read(url, postdata=None, method=None): result = None if not postdata is None: postdata = urlencode(postdata).encode() try: req = Request(url, data=postdata) req.add_header('User-Agent', 'PushIP/1.1 (<EMAIL>)') req.add_header('Content-type', 'application/x-www-form-urlencoded') if not method is None: req.get_method = lambda: method urlItem = urlopen(req, timeout=10) result = urlItem.read() urlItem.close() except URLError as e: print('URLError: {0}'.format(e.reason)) except HTTPError as e: print('HTTPError: {0}'.format(e.reason)) except Exception as e: print('FetchError: HTTP data fetch error: {0}'.format(e)) return result def _command(cmd_str): child = subprocess.Popen(cmd_str, stdout = subprocess.PIPE, shell = True) return child.communicate()[0] def _get_hostname(): return _command('cat /etc/hostname').replace('\n', '') def _get_lanip(): return _command('hostname -I').replace('\n', '') def _get_wanip(): myip = _url_read('https://shixf.com/api/getip') if myip is None: return '0.0.0.0' return myip def get_message(): hostname = _get_hostname() lanip = _get_lanip() wanip = _get_wanip() return ('Hostname: {0}\n' 'LanIP: {1}\n' 'WanIP: {2}').format(hostname, lanip, wanip) def pushover(msg): return _url_read('https://api.pushover.net/1/messages.json', postdata = { 'token': pushover_token, 'user': pushover_user, 'message': msg }) if __name__ == '__main__': pushover(get_message()) ```