content
stringlengths
5
1.05M
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- from Scripts.shared_imports import * from Scripts.utils import choice from datetime import datetime, date, timedelta from configparser import ConfigParser from pkg_resources import parse_version from random import randrange from shutil import copyfile from itertools import islice import io import json import requests import zipfile import time import hashlib import pathlib import pickle ########################### Check Lists Updates ########################### def check_lists_update(spamListDict, silentCheck = False): SpamListFolder = spamListDict['Meta']['SpamListFolder'] currentListVersion = spamListDict['Meta']['VersionInfo']['LatestLocalVersion'] def update_last_checked(): currentDate = datetime.today().strftime('%Y.%m.%d.%H.%M') #Update Dictionary with latest release gotten from API spamListDict['Meta']['VersionInfo'].update({'LatestLocalVersion': latestRelease}) spamListDict['Meta']['VersionInfo'].update({'LastChecked': currentDate}) # Prepare data for json file update, so only have to check once a day automatically newJsonContents = json.dumps({'LatestRelease': latestRelease, 'LastChecked' : currentDate}) with open(spamListDict['Meta']['VersionInfo']['Path'], 'w', encoding="utf-8") as file: json.dump(newJsonContents, file, indent=4) if silentCheck == False: print("\nChecking for updates to spam lists...") if os.path.isdir(SpamListFolder): pass else: try: os.mkdir(SpamListFolder) except: print("Error: Could not create folder. Try creating a folder called 'spam_lists' to update the spam lists.") try: response = requests.get("https://api.github.com/repos/ThioJoe/YT-Spam-Domains-List/releases/latest") if response.status_code != 200: if response.status_code == 403: if silentCheck == False: print(f"\n{B.RED}{F.WHITE}Error [U-4L]:{S.R} Got an 403 (ratelimit_reached) when attempting to check for spam list update.") print(f"This means you have been {F.YELLOW}rate limited by github.com{S.R}. Please try again in a while.\n") return False else: return spamListDict else: if silentCheck == False: print(f"{B.RED}{F.WHITE}Error [U-3L]:{S.R} Got non 200 status code (got: {response.status_code}) when attempting to check for spam list update.\n") print(f"If this keeps happening, you may want to report the issue here: https://github.com/ThioJoe/YT-Spammer-Purge/issues") if silentCheck == False: return False else: return spamListDict latestRelease = response.json()["tag_name"] except OSError as ox: if silentCheck == True: return spamListDict else: if "WinError 10013" in str(ox): print(f"{B.RED}{F.WHITE}WinError 10013:{S.R} The OS blocked the connection to GitHub. Check your firewall settings.\n") return False except: if silentCheck == True: return spamListDict else: print("Error: Could not get latest release info from GitHub. Please try again later.") return False # If update available if currentListVersion == None or (parse_version(latestRelease) > parse_version(currentListVersion)): print("\n> A new spam list update is available. Downloading...") fileName = response.json()["assets"][0]['name'] total_size_in_bytes = response.json()["assets"][0]['size'] downloadFilePath = SpamListFolder + fileName downloadURL = response.json()["assets"][0]['browser_download_url'] filedownload = getRemoteFile(downloadURL, stream=True) # These headers required to get correct file size block_size = 1048576 #1 MiB in bytes with open(downloadFilePath, 'wb') as file: for data in filedownload.iter_content(block_size): file.write(data) if os.stat(downloadFilePath).st_size == total_size_in_bytes: # Unzip files into folder and delete zip file attempts = 0 print("Extracting updated lists...") # While loop continues until file no longer exists, or too many errors while True: try: attempts += 1 time.sleep(0.5) with zipfile.ZipFile(downloadFilePath,"r") as zip_ref: zip_ref.extractall(SpamListFolder) os.remove(downloadFilePath) except PermissionError as e: if attempts <= 10: continue else: traceback.print_exc() print(f"\n> {F.RED}Error:{S.R} The zip file containing the spam lists was downloaded, but there was a problem extracting the files because of a permission error. ") print(f"This can happen if an antivirus takes a while to scan the file. You may need to manually extract the zip file.") input("\nPress enter to Continue anyway...") break # THIS MEANS SUCCESS, the zip file was deleted after extracting, so returns except FileNotFoundError: update_last_checked() return spamListDict elif total_size_in_bytes != 0 and os.stat(downloadFilePath).st_size != total_size_in_bytes: os.remove(downloadFilePath) print(f" > {F.RED} File did not fully download. Please try again later.\n") return spamListDict else: update_last_checked() return spamListDict ############################# Check For App Update ############################## def check_for_update(currentVersion, updateReleaseChannel, silentCheck=False): isUpdateAvailable = False print("\nGetting info about latest updates...") try: if updateReleaseChannel == "stable": response = requests.get("https://api.github.com/repos/ThioJoe/YT-Spammer-Purge/releases/latest") elif updateReleaseChannel == "all": response = requests.get("https://api.github.com/repos/ThioJoe/YT-Spammer-Purge/releases") if response.status_code != 200: if response.status_code == 403: if silentCheck == False: print(f"\n{B.RED}{F.WHITE}Error [U-4]:{S.R} Got an 403 (ratelimit_reached) when attempting to check for update.") print(f"This means you have been {F.YELLOW}rate limited by github.com{S.R}. Please try again in a while.\n") else: print(f"\n{B.RED}{F.WHITE}Error [U-4]:{S.R} Got an 403 (ratelimit_reached) when attempting to check for update.") return None else: if silentCheck == False: print(f"{B.RED}{F.WHITE}Error [U-3]:{S.R} Got non 200 status code (got: {response.status_code}) when attempting to check for update.\n") print(f"If this keeps happening, you may want to report the issue here: https://github.com/ThioJoe/YT-Spammer-Purge/issues") else: print(f"{B.RED}{F.WHITE}Error [U-3]:{S.R} Got non 200 status code (got: {response.status_code}) when attempting to check for update.\n") return None else: # assume 200 response (good) if updateReleaseChannel == "stable": latestVersion = response.json()["name"] isBeta = False elif updateReleaseChannel == "all": latestVersion = response.json()[0]["name"] isBeta = response.json()[0]["prerelease"] except OSError as ox: if "WinError 10013" in str(ox): print(f"{B.RED}{F.WHITE}WinError 10013:{S.R} The OS blocked the connection to GitHub. Check your firewall settings.\n") else: print(f"{B.RED}{F.WHITE}Unknown OSError{S.R} Error occurred while checking for updates\n") return None except Exception as e: if silentCheck == False: print(e + "\n") print(f"{B.RED}{F.WHITE}Error [Code U-1]:{S.R} Problem while checking for updates. See above error for more details.\n") print("If this keeps happening, you may want to report the issue here: https://github.com/ThioJoe/YT-Spammer-Purge/issues") elif silentCheck == True: print(f"{B.RED}{F.WHITE}Error [Code U-1]:{S.R} Unknown problem while checking for updates. See above error for more details.\n") return None if parse_version(latestVersion) > parse_version(currentVersion): if isBeta == True: isUpdateAvailable = "beta" else: isUpdateAvailable = True if silentCheck == False: print("------------------------------------------------------------------------------------------") if isBeta == True: print(f" {F.YELLOW}A new {F.LIGHTGREEN_EX}beta{F.YELLOW} version{S.R} is available!") else: print(f" A {F.LIGHTGREEN_EX}new version{S.R} is available!") print(f" > Current Version: {currentVersion}") print(f" > Latest Version: {F.LIGHTGREEN_EX}{latestVersion}{S.R}") print("(To stop receiving beta releases, change the 'release_channel' setting in the config file)") print("------------------------------------------------------------------------------------------") userChoice = choice("Update Now?") if userChoice == True: if sys.platform == 'win32' or sys.platform == 'win64': print(f"\n> {F.LIGHTCYAN_EX} Downloading Latest Version...{S.R}") if updateReleaseChannel == "stable": jsondata = json.dumps(response.json()["assets"]) elif updateReleaseChannel == "all": jsondata = json.dumps(response.json()[0]["assets"]) dict_json = json.loads(jsondata) # Get files in release, get exe and hash info i,j,k = 0,0,0 # i = index of all, j = index of exe, k = index of hash for asset in dict_json: i+=1 name = str(asset['name']) if '.exe' in name.lower(): filedownload = requests.get(dict_json[0]['browser_download_url'], stream=True) j+=1 # Count number of exe files in release, in case future has multiple exe's, can cause warning if '.sha256' in name.lower(): #First removes .sha256 file extension, then removes all non-alphanumeric characters downloadHashSHA256 = re.sub(r'[^a-zA-Z0-9]', '', name.lower().replace('.sha256', '')) k += 1 ignoreHash = False # Validate Retrieved Info if j > 1: # More than one exe file in release print(f"{F.YELLOW}Warning!{S.R} Multiple exe files found in release. You must be updating from the future when that was not anticipated.") print("You should instead manually download the latest version from: https://github.com/ThioJoe/YT-Spammer-Purge/releases") print("You can try continuing anyway, but it might not be successful, or might download the wrong exe file.") input("\nPress enter to continue...") elif j == 0: # No exe file in release print(f"{F.LIGHTRED_EX}Warning!{S.R} No exe file found in release. You'll have to manually download the latest version from:") print("https://github.com/ThioJoe/YT-Spammer-Purge/releases") return False if k == 0: # No hash file in release print(f"{F.YELLOW}Warning!{S.R} No verification sha256 hash found in release. If download fails, you can manually download latest version here:") print("https://github.com/ThioJoe/YT-Spammer-Purge/releases") input("\nPress Enter to try to continue...") ignoreHash = True elif k>0 and k!=j: print(f"{F.YELLOW}Warning!{S.R} Too many or too few sha256 files found in release. If download fails, you should manually download latest version here:") print("https://github.com/ThioJoe/YT-Spammer-Purge/releases") input("\nPress Enter to try to continue...") # Get and Set Download Info total_size_in_bytes= int(filedownload.headers.get('content-length', 0)) block_size = 1048576 #1 MiB in bytes downloadFileName = dict_json[0]['name'] # Check if file exists already, ask to overwrite if it does if os.path.exists(downloadFileName): print(f"\n{B.RED}{F.WHITE} WARNING! {S.R} '{F.YELLOW}{downloadFileName}{S.R}' file already exists. This would overwrite the existing file.") confirm = choice("Overwrite this existing file?") if confirm == True: try: os.remove(downloadFileName) except: traceback.print_exc() print(f"\n{F.LIGHTRED_EX}Error F-6:{S.R} Problem deleting existing existing file! Check if it's gone, or delete it yourself, then try again.") print("The info above may help if it's a bug, which you can report here: https://github.com/ThioJoe/YT-Spammer-Purge/issues") input("Press enter to Exit...") sys.exit() elif confirm == False or confirm == None: return False # Download File with open(downloadFileName, 'wb') as file: numProgressBars = 30 for data in filedownload.iter_content(block_size): progress = os.stat(downloadFileName).st_size/total_size_in_bytes * numProgressBars print(f"{F.LIGHTGREEN_EX}<[{F.LIGHTCYAN_EX}" + '='*round(progress) + ' '*(numProgressBars-round(progress)) + f"{F.LIGHTGREEN_EX}]>{S.R}\r", end="") #Print Progress bar file.write(data) print(f"\n> {F.LIGHTCYAN_EX}Verifying Download Integrity...{S.R} ") # Verify Download Size if os.stat(downloadFileName).st_size == total_size_in_bytes: pass elif total_size_in_bytes != 0 and os.stat(downloadFileName).st_size != total_size_in_bytes: os.remove(downloadFileName) print(f"\n> {F.RED} File did not fully download. Please try again later.") return False elif total_size_in_bytes == 0: print("Something is wrong with the download on the remote end. You should manually download latest version here:") print("https://github.com/ThioJoe/YT-Spammer-Purge/releases") # Verify hash if ignoreHash == False: if downloadHashSHA256 == hashlib.sha256(open(downloadFileName, 'rb').read()).hexdigest().lower(): pass else: os.remove(downloadFileName) print(f"\n> {F.RED} Hash did not match. Please try again later.") print("Or download the latest version manually from here: https://github.com/ThioJoe/YT-Spammer-Purge/releases") return False # Print Success print(f"\n > Download Completed: {F.LIGHTGREEN_EX}{downloadFileName}{S.R}") if isBeta == False: print("\nYou can now delete the old version. (Or keep it around in case you encounter any issues with the new version)") else: print(f"\n{F.LIGHTYELLOW_EX}NOTE:{S.R} Because this is a {F.CYAN}beta release{S.R}, you should keep the old version around in case you encounter any issues") print(f" > And don't forget to report any problems you encounter here: {F.YELLOW}TJoe.io/bug-report{S.R}") input("\nPress Enter to Exit...") sys.exit() else: # We do this because we pull the .exe for windows, but maybe we could use os.system('git pull')? Because this is a GIT repo, unlike the windows version print(f"> {F.RED} Error:{S.R} You are using an unsupported OS for the autoupdater (macos/linux). \n This updater only supports Windows (right now). Feel free to get the files from github: https://github.com/ThioJoe/YT-Spammer-Purge") return False elif userChoice == "False" or userChoice == None: return False elif silentCheck == True: return isUpdateAvailable elif parse_version(latestVersion) == parse_version(currentVersion): if silentCheck == False: print(f"\nYou have the {F.LIGHTGREEN_EX}latest{S.R} version: {F.LIGHTGREEN_EX}" + currentVersion) return False else: if silentCheck == False: print("\nNo newer release available - Your Version: " + currentVersion + " -- Latest Version: " + latestVersion) return False ######################### Try To Get Remote File ########################## def getRemoteFile(url, stream, silent=False, headers=None): try: if stream == False: response = requests.get(url, headers=headers) elif stream == True: response = requests.get(url, headers=headers, stream=True) if response.status_code != 200: if silent == False: print("Error fetching remote file or resource: " + url) print("Response Code: " + str(response.status_code)) else: return response except Exception as e: if silent == False: print(e + "\n") print(f"{B.RED}{F.WHITE} Error {S.R} While Fetching Remote File or Resource: " + url) print("See above messages for details.\n") print("If this keeps happening, you may want to report the issue here: https://github.com/ThioJoe/YT-Spammer-Purge/issues") return None ############################# Load a Config File ############################## # Put config settings into dictionary def load_config_file(configVersion=None, forceDefault=False, skipConfigChoice=False, configFileName="SpamPurgeConfig.ini"): configDict = {} def default_config_path(relative_path): if hasattr(sys, '_MEIPASS'): # If running as a pyinstaller bundle return os.path.join(sys._MEIPASS, relative_path) return os.path.join(os.path.abspath("assets"), relative_path) # If running as script, specifies resource folder as /assets # If user config file exists, keep path. Otherwise use default config file path if os.path.exists(configFileName) and forceDefault == False: default = False configFileNameWithPath = str(configFileName) else: configFileNameWithPath = default_config_path("default_config.ini") default = True # Load Contents of config file try: with open(configFileNameWithPath, 'r', encoding="utf-8") as configFile: configData = configFile.read() configFile.close() except: traceback.print_exc() print(f"{B.RED}{F.WHITE}Error Code: F-4{S.R} - Config file found, but there was a problem loading it! The info above may help if it's a bug.") print("\nYou can manually delete SpamPurgeConfig.ini and use the program to create a new default config.") input("Press enter to Exit...") sys.exit() # Sanitize config Data by removing quotes configData = configData.replace("\'", "") configData = configData.replace("\"", "") # Converts string from config file, wraps it to make it behave like file so it can be read by parser # Must use .read_file, .read doesn't work wrappedConfigData = io.StringIO(configData) parser = ConfigParser() parser.read_file(wrappedConfigData) # Convert raw config dictionary into easier to use dictionary settingsToKeepCase = ["your_channel_id", "videos_to_scan", "channel_ids_to_filter", "regex_to_filter", "channel_to_scan", "log_path", "this_config_description"] validWordVars = ['ask', 'mine', 'default'] for section in parser.sections(): for setting in parser.items(section): # Setting[0] is name of the setting, Setting[1] is the value of the setting if setting[0] in settingsToKeepCase and setting[1].lower() not in validWordVars: configDict[setting[0]] = setting[1] else: # Take values out of raw dictionary structure and put into easy dictionary with processed values configDict[setting[0]] = setting[1].lower() if setting[1].lower() == "false": configDict[setting[0]] = False elif setting[1].lower() == "true": configDict[setting[0]] = True # Prevent prompt about config file if it's the default config file if default == True: configDict['use_this_config'] = True # ---------------------------------------------------------------------------------------------------------------------- # Check if config out of date, update, ask to use config or not else: if configDict['use_this_config'] == False: configDict = load_config_file(forceDefault = True) elif configDict['use_this_config'] == 'ask' or configDict['use_this_config'] == True: if configVersion != None: configDict = check_update_config_file(configVersion, configDict, configFileName) if configDict['use_this_config'] == True or skipConfigChoice == True: pass else: configDict = choose_config_file(configDict, configVersion) else: print("Error C-1: Invalid value in config file for setting 'use_this_config' - Must be 'True', 'False', or 'Ask'") input("Press Enter to exit...") sys.exit() return configDict ############################# Check for Config Update ############################## def check_update_config_file(newVersion, existingConfig, configFileName): backupDestinationFolder = os.path.join(RESOURCES_FOLDER_NAME, "User_Config_Backups") try: existingConfigVersion = int(existingConfig['config_version']) if existingConfigVersion < newVersion: configOutOfDate = True else: configOutOfDate = False except: configOutOfDate = True if configOutOfDate == True: print(f"\n{F.YELLOW} WARNING! {S.R} Your config file is {F.YELLOW}out of date{S.R}. ") print(f" > Program will {F.LIGHTGREEN_EX}update your config{S.R} now, {F.LIGHTGREEN_EX}back up the old file{S.R}, and {F.LIGHTGREEN_EX}copy your settings over{S.R})") input("\nPress Enter to update config file...") else: return existingConfig # If user config file exists, keep path. Otherwise use default config file path if os.path.exists(configFileName): pass else: print("No existing config file found!") return False # Load data of old config file with open(configFileName, 'r', encoding="utf-8") as oldFile: oldConfigData = oldFile.readlines() oldFile.close() # Rename config to backup and copy to backup folder if not os.path.exists(backupDestinationFolder): os.mkdir(backupDestinationFolder) backupConfigFileName = f"{configFileName}.backup_v{existingConfigVersion}" backupNameAndPath = os.path.join(backupDestinationFolder, backupConfigFileName) if os.path.isfile(backupNameAndPath): print("Existing backup config file found. Random number will be added to new backup file name.") while os.path.isfile(backupNameAndPath): backupConfigFileName = backupConfigFileName + "_" + str(randrange(999)) backupNameAndPath = os.path.join(backupDestinationFolder, backupConfigFileName) # Attempt to copy backup to backup folder, otherwise just rename try: copyfile(configFileName, os.path.abspath(backupNameAndPath)) print(f"\nOld config file renamed to {F.CYAN}{backupConfigFileName}{S.R} and placed in {F.CYAN}{backupDestinationFolder}{S.R}") except: os.rename(configFileName, backupConfigFileName) print(f"\nOld config file renamed to {F.CYAN}{backupConfigFileName}{S.R}. Note: Backup file could not be moved to backup folder, so it was just renamed.") # Creates new config file from default create_config_file(updating=True, configFileName=configFileName) try: with open(configFileName, 'r', encoding="utf-8") as newFile: newConfigData = newFile.readlines() newDataList = [] # Go through all new config lines for newLine in newConfigData: if not newLine.strip().startswith('#') and not newLine.strip()=="" and "version" not in newLine: for setting in existingConfig.keys(): # Check if any old settings are in new config file if newLine.startswith(setting): for oldLine in oldConfigData: if not oldLine.strip().startswith('#') and not oldLine.strip()=="" and "version" not in oldLine: # Sets new line to be the old line if oldLine.startswith(setting): newLine = oldLine break break # The new config file writes itself again, but with the modified newLine's newDataList.append(newLine) success = False attempts = 0 while success == False: try: attempts += 1 with open(configFileName, "w", encoding="utf-8") as newFile: newFile.writelines(newDataList) success = True except PermissionError: if attempts < 3: print(f"\n{F.YELLOW}\nERROR!{S.R} Cannot write to {F.LIGHTCYAN_EX}{configFileName}{S.R}. Is it open? Try {F.YELLOW}closing the file{S.R} before continuing.") input("\n Press Enter to Try Again...") else: print(f"{F.LIGHTRED_EX}\nERROR! Still cannot write to {F.LIGHTCYAN_EX}{configFileName}{F.LIGHTRED_EX}. {F.YELLOW}Try again?{S.R} (Y) or {F.YELLOW}Skip Updating Config (May Cause Errors)?{S.R} (N)") if choice("Choice:") == False: break return load_config_file(configVersion=None, skipConfigChoice=True, configFileName=configFileName) except: traceback.print_exc() print("--------------------------------------------------------------------------------") print("Something went wrong when copying your config settings. You'll have to manually copy them from backup.") input("\nPress Enter to exit...") sys.exit() ############################# Get List of Files Matching Regex ############################## def list_config_files(relativePath=None): configNumExpression = r'(?<=spampurgeconfig)(\d+?)(?=\.ini)' fileList = list() if relativePath == None: path = os.getcwd() else: path = os.path.abspath(relativePath) for file in os.listdir(path): try: match = re.search(configNumExpression, file.lower()).group(0) # Only exact matches, no backups if file.lower() == "spampurgeconfig" + match + ".ini": fileList.append(file) except AttributeError as ax: if "NoneType" in str(ax): pass else: traceback.print_exc() print("--------------------------------------------------------------------------------") print("Something went wrong when getting list of config files. Check your regex.") input("\nPress Enter to exit...") sys.exit() return fileList ############################# Ask to use Config or Which One ############################## # Applies if not using default config, and if not set to 'not use' config def choose_config_file(configDict, newestConfigVersion): configNumExpression = r'(?<=spampurgeconfig)(\d+?)(?=\.ini)' configFileList = list_config_files() # If only one config file exists, prompt to use if len(configFileList) == 0: if choice(f"\nFound {F.YELLOW}config file{S.R}, use those settings?") == False: return load_config_file(forceDefault=True) else: return configDict # If more than one config exists, list and ask which if len(configFileList) > 0: configChoiceDict = {} print(f"\n=================== Found Multiple Config Files ===================") if os.path.exists("SpamPurgeConfig.ini"): print(f"\n{F.YELLOW}------------- Use primary config file or another one? -------------{S.R}") print(F" {F.LIGHTCYAN_EX}Y:{S.R} Use primary config file") print(F" {F.LIGHTCYAN_EX}N:{S.R} Use default settings, don't load any config") print(f"\n{F.YELLOW}------------------ Other Available Config Files -------------------{S.R}") else: print("\n Available Config Files:") # Print Available Configs, and add to dictionary for file in configFileList: configNum = re.search(configNumExpression, file.lower()).group(0) configDescription = load_config_file(configFileName=file, skipConfigChoice=True)['this_config_description'] configChoiceDict[configNum] = file print(f" {F.LIGHTCYAN_EX}{configNum}:{S.R} {configDescription}") valid = False while valid == False: configChoice = input("\n Config Choice (Y/N or #): ") if configChoice.lower() == "y": return configDict elif configChoice.lower() == "n": return load_config_file(forceDefault=True) elif configChoice.lower() == "" or configChoice.lower() not in configChoiceDict.keys(): print(f"\n{F.YELLOW} Invalid Choice! Please enter a valid choice.{S.R}") else: # Load an available config, update it, then return it chosenConfigDict = load_config_file(skipConfigChoice=True, configFileName=configChoiceDict[configChoice]) chosenConfigDict = check_update_config_file(newestConfigVersion, chosenConfigDict, configChoiceDict[configChoice]) return load_config_file(skipConfigChoice=True, configFileName=configChoiceDict[configChoice]) ############################# Ingest Other Files ############################## def ingest_asset_file(fileName): def assetFilesPath(relative_path): if hasattr(sys, '_MEIPASS'): # If running as a pyinstaller bundle return os.path.join(sys._MEIPASS, relative_path) return os.path.join(os.path.abspath("assets"), relative_path) # If running as script, specifies resource folder as /assets # Open list of root zone domain extensions with open(assetFilesPath(fileName), 'r', encoding="utf-8") as file: data = file.readlines() dataList = [] for line in data: if not line.strip().startswith('#'): line = line.strip() dataList.append(line.lower()) return dataList def copy_asset_file(fileName, destination): def assetFilesPath(relative_path): if hasattr(sys, '_MEIPASS'): # If running as a pyinstaller bundle return os.path.join(sys._MEIPASS, relative_path) return os.path.join(os.path.abspath("assets"), relative_path) # If running as script, specifies resource folder as /assets copyfile(assetFilesPath(fileName), os.path.abspath(destination)) def ingest_list_file(relativeFilePath, keepCase = True): if os.path.exists(relativeFilePath): with open(relativeFilePath, 'r', encoding="utf-8") as listFile: # If file doesn't end with newline, add one listData = listFile.readlines() lastline = listData[-1] with open(relativeFilePath, 'a', encoding="utf-8") as listFile: if not lastline.endswith('\n'): listFile.write('\n') processedList = [] for line in listData: line = line.strip() if not line.startswith('#') and line !="": if keepCase == False: processedList.append(line.lower()) else: processedList.append(line) return processedList else: return None def get_list_file_version(relativeFilePath): if os.path.exists(relativeFilePath): matchBetweenBrackets = '(?<=\[)(.*?)(?=\])' # Matches text between first set of two square brackets with open(relativeFilePath, 'r', encoding="utf-8") as file: for line in islice(file, 0, 5): try: matchItem = re.search(matchBetweenBrackets, line) if matchItem: listVersion = str(matchItem.group(0)) except AttributeError: pass return listVersion else: return None ############################# CONFIG FILE FUNCTIONS ############################## def create_config_file(updating=False, dontWarn=False, configFileName="SpamPurgeConfig.ini"): def config_path(relative_path): if hasattr(sys, '_MEIPASS'): # If running as a pyinstaller bundle return os.path.join(sys._MEIPASS, relative_path) return os.path.join(os.path.abspath("assets"), relative_path) # If running as script, specifies resource folder as /assets if os.path.exists(configFileName): if updating == False and dontWarn == False: # First get list of existing secondary config files, to know what to name the new one configNumExpression = r'(?<=spampurgeconfig)(\d+?)(?=\.ini)' configFileList = list_config_files() if len(configFileList) > 0: configNumList = list() for file in configFileList: configNum = re.search(configNumExpression, file.lower()).group(0) configNumList.append(int(configNum)) newConfigNum = max(configNumList)+1 else: newConfigNum = 2 print("-------------------------------------------------------------------------------------") print(f"\nConfig File {F.YELLOW}{configFileName}{S.R} already exists. You can {F.LIGHTCYAN_EX}reset it to default{S.R}, or {F.LIGHTCYAN_EX}create another secondary config{S.R}.") print("\nWhat do you want to do?") print(f" 1: {F.LIGHTRED_EX}Reset{S.R} main config ({F.LIGHTRED_EX}{configFileName}{S.R}) to fresh default config") print(f" 2: {F.YELLOW}Create{S.R} another secondary config file (SpamPurgeConfig{F.YELLOW}{newConfigNum}{S.R}.ini)") userChoice = input("\n Choose (1/2): ") if userChoice.lower() == "x": return "MainMenu" elif userChoice == "1": # Removes existing file to make room for fresh default config try: os.remove(configFileName) except: traceback.print_exc() print("Error Code F-1: Problem deleting existing existing file! Check if it's gone. The info above may help if it's a bug.") print("If this keeps happening, you may want to report the issue here: https://github.com/ThioJoe/YT-Spammer-Purge/issues") input("Press enter to Exit...") sys.exit() elif userChoice == "2": configFileName = f"SpamPurgeConfig{newConfigNum}.ini" input(f"\nPress Enter to create additional config file: {F.YELLOW}{configFileName}{S.R}") # Creates fresh new config file # Get default config file contents try: with open(config_path('default_config.ini'), 'r', encoding="utf-8") as defaultConfigFile: data = defaultConfigFile.read() defaultConfigFile.close() except: traceback.print_exc() print(f"{B.RED}{F.WHITE}Error Code: F-2{S.R} - Problem reading default config file! The info above may help if it's a bug.") input("Press enter to Exit...") sys.exit() # Create config file attempts = 0 success = False while success == False: try: attempts += 1 with open(configFileName, "w", encoding="utf-8") as configFile: configFile.write(data) configFile.close() success = True except PermissionError: if attempts < 3: print(f"\n{F.YELLOW}\nERROR!{S.R} Cannot write to {F.LIGHTCYAN_EX}{configFileName}{S.R}. Is it open? Try {F.YELLOW}closing the file{S.R} before continuing.") input("\n Press Enter to Try Again...") else: print(f"{F.LIGHTRED_EX}\nERROR! Still cannot write to {F.LIGHTCYAN_EX}{configFileName}{F.LIGHTRED_EX}. {F.YELLOW}Try again?{S.R} (Y) or {F.YELLOW}Abandon Writing Config?{S.R} (N)") if choice("Choice:") == False: break except: traceback.print_exc() print(f"{B.RED}{F.WHITE}Error Code: F-3{S.R} Problem creating config file! The info above may help if it's a bug.") input("Press enter to Exit...") sys.exit() if os.path.exists(configFileName): parser = ConfigParser() try: parser.read("SpamPurgeConfig.ini", encoding="utf-8") if parser.get("info", "config_version"): if updating == False: print(f"\n{B.GREEN}{F.BLACK} SUCCESS! {S.R} {F.YELLOW}{configFileName}{S.R} file created successfully.") print(f"\nYou can now edit the file to your liking. You can also {F.YELLOW}create additional{S.R} configs using this same menu.\n") input("Press Enter to return to main menu...") return "MainMenu" else: return True else: print("Something might have gone wrong. Check if SpamPurgeConfig.ini file exists and has contents.") input("Press enter to Exit...") sys.exit() except: traceback.print_exc() print("Something went wrong when checking the created file. Check if SpamPurgeConfig.ini exists and has text. The info above may help if it's a bug.") input("Press enter to Exit...") sys.exit() # ------------------------------------------------------------------- def parse_comment_list(config, recovery=False, removal=False, returnFileName=False): if recovery == True: actionVerb = "recover" actionNoun = "recovery" elif removal == True: actionVerb = "remove" actionNoun = "removal" validFile = False manuallyEnter = False while validFile == False and manuallyEnter == False: print("--------------------------------------------------------------------------------") print(f"\nEnter the {F.YELLOW}name of the log file{S.R} with the comments to {actionVerb} (you can rename it to something easier like \'log.rtf\')") print(f" > {F.BLACK}{B.LIGHTGREEN_EX} TIP: {S.R} You can just drag the file into this window instead of typing it") print(F"{F.YELLOW}Or:{S.R} Just hit Enter to manually paste in the list of IDs next)") listFileName = input("\nLog File Name (Example: \"log.rtf\" or \"log\"): ") if str(listFileName).lower() == "x": return "MainMenu", None if len(listFileName) > 0: if os.path.exists(listFileName): pass elif os.path.exists(listFileName+".rtf"): listFileName = listFileName + ".rtf" elif os.path.exists(listFileName+".txt"): listFileName = listFileName + ".txt" else: # Try in the log folder listFileName = os.path.join(config['log_path'], listFileName) if os.path.exists(listFileName): pass elif os.path.exists(listFileName+".rtf"): listFileName = listFileName + ".rtf" elif os.path.exists(listFileName+".txt"): listFileName = listFileName + ".txt" # Get file path if os.path.exists(listFileName): try: with open(listFileName, 'r', encoding="utf-8") as listFile: data = listFile.read() listFile.close() validFile = True except: print(f"{F.RED}Error Code F-5:{S.R} Log File was found but there was a problem reading it.") else: print(f"\n{F.LIGHTRED_EX}Error: File not found.{S.R} Make sure it is in the same folder as the program.\n") print(f"Enter '{F.YELLOW}Y{S.R}' to try again, or '{F.YELLOW}N{S.R}' to manually paste in the comment IDs.") userChoice = choice("Try entering file name again?") if userChoice == True: pass elif userChoice == False: manuallyEnter = True elif userChoice == None: return "MainMenu", None else: manuallyEnter = True if manuallyEnter == True: print("\n\n--- Manual Comment ID Entry Instructions ---") print(f"1. {F.YELLOW}Open the log file{S.R} and look for where it shows the list of {F.YELLOW}\"IDs of Matched Comments\".{S.R}") print(f"2. {F.YELLOW}Copy that list{S.R}, and {F.YELLOW}paste it below{S.R} (In windows console try pasting by right clicking).") print("3. If not using a log file, instead enter the ID list in this format: FirstID, SecondID, ThirdID, ... \n") data = str(input("Paste the list here, then hit Enter: ")) if str(data).lower() == "x": return "MainMenu", None print("\n") # Parse data into list if manuallyEnter == False and '[' in data and ']' in data: matchBetweenBrackets = '(?<=\[)(.*?)(?=\])' # Matches text between first set of two square brackets #matchIncludeBracktes = '\[(.*?)\]' # Matches between square brackets, including brackets resultList = str(re.search(matchBetweenBrackets, data).group(0)) else: resultList = data resultList = resultList.replace("\'", "") resultList = resultList.replace("[", "") resultList = resultList.replace("]", "") resultList = resultList.replace(" ", "") resultList = resultList.split(",") if len(resultList) == 0: print(f"\n{F.RED}Error Code R-1:{S.R} No comment IDs detected, try entering them manually and make sure they are formatted correctly.") input("\nPress Enter to return to main menu...") return "MainMenu", None # Check for valid comment IDs validCount = 0 notValidCount = 0 notValidList = [] for id in resultList: if id[0:2] == "Ug": validCount += 1 else: notValidCount += 1 notValidList.append(id) if notValidCount > 0: print(f"{F.YELLOW}Possibly Invalid Comment IDs:{S.R} " + str(notValidList)+ "\n") if notValidCount == 0: print(f"\n{F.GREEN}Loaded all {str(validCount)} comment IDs successfully!{S.R}") input(f"\nPress Enter to begin {actionNoun}... ") elif validCount > 0 and notValidCount > 0: print(f"{F.RED}Warning!{S.R} {str(validCount)} valid comment IDs loaded successfully, but {str(notValidCount)} may be invalid. See them above.") input(f"\nPress Enter to try {actionNoun} anyway...\n") elif validCount == 0 and notValidCount > 0: print(f"\n{F.RED}Warning!{S.R} All loaded comment IDs appear to be invalid. See them above.") input(f"Press Enter to try {actionNoun} anyway...\n") if returnFileName == False: return resultList, None else: return resultList, pathlib.Path(os.path.relpath(listFileName)).stem ######################################### Read & Write Dict to Pickle File ######################################### def write_dict_pickle_file(dictToWrite, fileName, relativeFolderPath=RESOURCES_FOLDER_NAME, forceOverwrite=False): fileNameWithPath = os.path.join(relativeFolderPath, fileName) success = False while success == False: if os.path.isdir(relativeFolderPath): success = True else: try: os.mkdir(relativeFolderPath) success = True except: print(f"Error: Could not create folder. Try creating the folder {relativeFolderPath} to continue.") input("Press Enter to try again...") if os.path.exists(fileNameWithPath): if forceOverwrite == False: print(f"\n File '{fileName}' already exists! Either overwrite, or you'll need to enter a new name.") if choice("Overwrite File?") == True: pass else: confirm = False while confirm == False: newFileName = input("\nEnter a new file name, NOT including the extension: ") + ".save" print("\nNew file name: " + newFileName) confirm = choice("Is this correct?") fileNameWithPath = os.path.join(relativeFolderPath, newFileName) success = False while success == False: try: with open(fileNameWithPath, 'wb') as pickleFile: pickle.dump(dictToWrite, pickleFile) #json.dump(dictToWrite, jsonFile, indent=4) pickleFile.close() success = True except: traceback.print_exc() print("--------------------------------------------------------------------------------") print("Something went wrong when writing your pickle file. Did you open it or something?") input(f"\nPress Enter to try loading file again: {fileNameWithPath}") return True def read_dict_pickle_file(fileNameNoPath, relativeFolderPath=RESOURCES_FOLDER_NAME): failedAttemptCount = 0 fileNameWithPath = os.path.join(relativeFolderPath, fileNameNoPath) while True and not failedAttemptCount > 2: if os.path.exists(fileNameWithPath): failedAttemptCount = 0 while True and not failedAttemptCount > 2: try: with open(fileNameWithPath, 'rb') as pickleFile: #dictToRead = json.load(jsonFile) dictToRead = pickle.load(pickleFile) pickleFile.close() return dictToRead except: traceback.print_exc() print("--------------------------------------------------------------------------------") print("Something went wrong when reading your pickle file. Is it in use? Try closing it.") input(f"\nPress Enter to try loading file again: {fileNameWithPath}") failedAttemptCount += 1 return False else: print(f"\nFile '{fileNameNoPath}' not found! Try entering the name manually.") input(f"\nPress Enter to try loading file again: {fileNameWithPath}") failedAttemptCount += 1 return False def try_remove_file(fileNameWithPath): attempts = 1 while attempts < 3: try: os.remove(fileNameWithPath) return True except: print(f"\n{F.RED}\nERROR:{S.R} Could not remove file: '{fileNameWithPath}'. Is it open? If so, try closing it.") input("\nPress Enter to try again...") attempts += 1 print(f"\n{F.RED}\nERROR:{S.R} The File '{fileNameWithPath}' still could not be removed. You may have to delete it yourself.") input("\nPress Enter to continue...") return False def check_existing_save(): relativeSaveDir = os.path.join(RESOURCES_FOLDER_NAME, "Removal_List_Progress") savesList = list() if os.path.isdir(relativeSaveDir): fileList = list() for (_, _, filenames) in os.walk(relativeSaveDir): fileList.extend(filenames) if len(fileList) > 0: for fileName in fileList: if fileName[-5:] == ".save": savesList.extend([fileName]) return savesList
"""Doc""" import nltk from nltk.corpus import state_union from nltk.tokenize import PunktSentenceTokenizer def main(): """Doc""" train_text = state_union.raw("2005-GWBush.txt") sample_text = state_union.raw("2006-GWBush.txt") # train the Punkt tokenizer custom_sent_tokenizer = PunktSentenceTokenizer(train_text) # Then we can actually tokenize tokenized = custom_sent_tokenizer.tokenize(sample_text) process_content(tokenized) def process_content(sentences): """Docstring""" try: for i in sentences[:5]: words = nltk.word_tokenize(i) tagged = nltk.pos_tag(words) print(tagged) except Exception as exc: print(str(exc)) if __name__ == '__main__': main()
import security as S import json import os class DB: __PASSWORD__ = bytearray([ 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59 ]) __SALT__ = bytearray([ 32, 32, 32, 32, 32, 32, 32, 32, 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 , 5 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 33, 33, 33 ]) __SHORT_SALT__ = bytearray([ 32, 32, 32, 32, 32, 32, 32, 32, 5, 5, 5, 5, 5, 5, 5, 5 ]) def __init__(self, name: str): self.__name__ = name if not os.path.exists(f'./{name}.db'): open(f'./{name}.db', 'wb').write(S.EncryptAes256(bytearray(), DB.__PASSWORD__, DB.__SHORT_SALT__)) self.__table__ = {} else: self.__table__ = json.loads(S.DecryptFileAes256(f'./{name}.db', DB.__PASSWORD__, DB.__SHORT_SALT__, False).decode('utf-8')) def AddUser(self, login: str, secretKey: bytearray, passwordHash: bytearray, salt: bytearray, path: str, masterKey: bytearray): if login in self.__table__: raise Exception('exist') self.Set(login, [ S.EncryptAes256(secretKey, masterKey, self.__SHORT_SALT__).hex(), S.EncryptAes256(passwordHash, masterKey, self.__SHORT_SALT__).hex(), S.EncryptAes256(salt, masterKey, self.__SHORT_SALT__).hex(), S.EncryptAes256(path.encode(), masterKey, self.__SHORT_SALT__).hex() ]) def ChangeSecretKey(self, login: str, masterKey: bytearray, newSecretKey: bytearray): if not login in self.__table__: raise Exception('Don\'t exist') self.__table__[login][0] = S.EncryptAes256(newSecretKey, masterKey, self.__SHORT_SALT__).hex() def Exist(self, login: str): return login in self.__table__ def GetUser(self, login: str, masterKey: bytearray): if not login in self.__table__: raise Exception('Don\'t exist') return [ S.DecryptAes256(bytearray.fromhex(self.__table__[login][0]), masterKey, self.__SHORT_SALT__), S.DecryptAes256(bytearray.fromhex(self.__table__[login][1]), masterKey, self.__SHORT_SALT__), S.DecryptAes256(bytearray.fromhex(self.__table__[login][2]), masterKey, self.__SHORT_SALT__), S.DecryptAes256(bytearray.fromhex(self.__table__[login][3]), masterKey, self.__SHORT_SALT__).decode() ] def Get(self, key:str): if not key in self.__table__: raise Exception('error') return self.__table__[key] def Set(self, key:str, value): self.__table__[key] = value self.Save() def Remove(self, key:str): self.__table__.pop(key, None) self.Save() def Save(self): temp = json.dumps(self.__table__) S.EncryptToFile(f'./{self.__name__}.db', self.__PASSWORD__, self.__SHORT_SALT__, temp)
# -*- coding: utf-8 -*- from qgis.core import QgsApplication from PyQt5.QtCore import QCoreApplication from PyQt5.QtWidgets import QDialog from pathlib import Path from os import rename from ..utils import wait_cursor from ..userInterface.create_profile_dialog import CreateProfileDialog from ..userInterface.message_box_factory import MessageBoxFactory class ProfileEditor(QDialog): def __init__(self, profile_manager_dialog, qgis_path, profile_manager, profile_handler,error_text, *args, **kwargs): super(ProfileEditor, self).__init__(*args, **kwargs) self.dlg = profile_manager_dialog self.profile_handler = profile_handler self.profile_manager = profile_manager self.message_box_factory = MessageBoxFactory(self.dlg) self.qgis_path = qgis_path self.error_text = error_text def edit_profile(self): """Renames profile with user input""" if self.dlg.list_profiles.currentItem() is None: self.message_box_factory.create_message_box(self.error_text, self.tr("Please choose a profile to rename first!")) elif self.dlg.list_profiles.currentItem().text() == Path(QgsApplication.qgisSettingsDirPath()).name: self.message_box_factory.create_message_box(self.error_text, self.tr("The active profile cannot be renamed!")) else: profile_before_change = self.profile_manager.adjust_to_operating_system(self.qgis_path + "/" + self.dlg .list_profiles.currentItem().text().replace(" - ", "")) dialog = CreateProfileDialog(self.dlg, self.profile_handler, True) dialog.exec_() while not self.profile_handler.is_cancel_button_clicked and not self.profile_handler.is_ok_button_clicked: QCoreApplication.processEvents() if self.profile_handler.is_ok_button_clicked: with wait_cursor(): if dialog.text_input.text() is "": self.message_box_factory.create_message_box(self.error_text, self.tr("Please enter a new profile name!")) else: profile_after_change = self.profile_manager.adjust_to_operating_system(self.qgis_path + "/" + dialog.text_input.text()) try: rename(profile_before_change, profile_after_change) print("Source path renamed to destination path successfully.") # If Source is a file # but destination is a directory except IsADirectoryError: print("Source is a file but destination is a directory.") # If source is a directory # but destination is a file except NotADirectoryError: print("Source is a directory but destination is a file.") # For permission related errors except PermissionError: print("Operation not permitted.") # For other errors except OSError as error: print(error) self.message_box_factory.create_message_box(self.error_text, self.tr("Profile Directory already exists!"))
def replace98and96(column): new = [] newval = column.max() for i in column: if (i == 96 | i == 98): new.append(newval) else: new.append(i) return new def cleanHeaders(data): cleanCol = [] for i in range(len(data.columns)): cleanCol.append(data.columns[i].replace('-', '')) data.columns = cleanCol
""" Abstract Base Class for Handler """ from abc import ABCMeta class BaseHandler(metaclass=ABCMeta): """ Base event handler """ session = None subscribe_topic = None publish_topic = None def set_session(self, session): """ Setup session for publishing """ self.session = session
from django.shortcuts import render from django.views.decorators.csrf import csrf_exempt # Create your views here. from django.http import HttpResponse,JsonResponse,FileResponse from .snowid import IdWorker from .models import Plugin from .pluginHelper import PluginHelper from . import globalVal from .core import MainframeConfig import zipfile import os from urllib.parse import unquote import json from django.core import serializers def index(request): return render(request, 'index.html') def pluginIndex(request): return render(request, 'plugin.html') def pluginExample(request): return render(request, 'plugin_example.html') def pluginEditor(request): return render(request, 'plugin_editor.html') def docs(request): return render(request, 'Application micro service plugin framework deployment manual.docs',) def download(request): return render(request, 'PaaSPluginFwk.zip') def help(request): return render(request, 'help.html') @csrf_exempt def uploadPluginZipFile(request): if request.method == "POST": request.encoding = "utf-8" file = request.FILES.get("file", None) fileName = request.POST.get("filename", "",) fileName = unquote(fileName) # flen = request.POST.get("flen", "") if file is None: return JsonResponse({ 'status' : '1', 'message': '文件不存在' }) else: with open(globalVal.PLUGINS_DIR+"/"+fileName, 'wb+') as f: for chunk in file.chunks(): f.write(chunk) with zipfile.ZipFile(globalVal.PLUGINS_DIR+"/"+fileName) as zf: zf.extractall(path=globalVal.PLUGINS_DIR) if os.path.exists(globalVal.PLUGINS_DIR+"/"+fileName): # 压缩包文件也删除 os.unlink(globalVal.PLUGINS_DIR+"/"+fileName) record = Plugin.objects.filter(id=fileName[0:-4]) if not record.exists(): Plugin.objects.create(id=fileName[0:-4], name="新上传的插件:"+fileName[0:-4], developer="dev", pluginVersion="0.1", copyright="dev", pluginDescription="添加插件...", useCounting=0, likeCounting=0, keywords="插件", pluginType="general", logo="", defaultOptions="?", isFree=1, license="Apache License 2.0", pricing="0", banned="0", parent='None', currOwner='None', owner='None') helper = PluginHelper() helper.registerPlugin(fileName[0:-4]) # 注册插件 return JsonResponse({ 'status' : '0' }) else: return JsonResponse({ 'status' : '1' }) def get_zip_file(input_path, result): files = os.listdir(input_path) for file in files: if os.path.isdir(input_path + os.sep + file): get_zip_file(input_path + os.sep + file, result) result.append(input_path + os.sep + file) def zip_plugin(plugins_folder, plugin_folder): f = zipfile.ZipFile(plugins_folder + os.sep + plugin_folder+".zip", 'w', zipfile.ZIP_DEFLATED) filelists = [] get_zip_file(plugins_folder + os.sep + plugin_folder, filelists) for file in filelists: fpath = file.replace(plugins_folder, '') f.write(file, fpath) f.close() return plugins_folder + os.sep + plugin_folder+".zip" @csrf_exempt def downloadPluginZipFile(request): pluginId = request.POST.get('filename', '') #pluginId = pluginId[0:9] if os.path.isdir(globalVal.PLUGINS_DIR + os.sep + pluginId): zip_plugin(globalVal.PLUGINS_DIR, pluginId) fileName = pluginId+".zip" if os.path.isfile(globalVal.PLUGINS_DIR + os.sep + fileName): file = open(globalVal.PLUGINS_DIR + os.sep + fileName,'rb') response = FileResponse(file) response['Content-Type'] = 'application/octet-stream' response["Content-disposition"] = "attachment;filename={0}".format(fileName) # file.close() # os.remove(globalVal.PLUGINS_DIR+"/"+fileName) return response return JsonResponse({ 'status' : '1' }) # 下载失败 @csrf_exempt def createPlugin(request): pluginName = request.POST.get('pluginName', '') developer = request.POST.get('developer', '') version = request.POST.get('version', '') copyright = request.POST.get('copyright', '') description = request.POST.get('description', '') keywords = request.POST.get('keywords', '') logo = request.POST.get('logo', '') defaultOptions = request.POST.get('defaultOptions', '') pricing = request.POST.get('pricing', '') banned = request.POST.get('banned', '') worker = IdWorker(1, 2, 0) key = "SP_" + str(worker.get_id()) Plugin.objects.create(id=key, name=pluginName, developer=developer, pluginVersion=version, copyright=copyright, pluginDescription=description, useCounting=0, likeCounting=0, keywords=keywords, pluginType="general", logo=logo, defaultOptions=defaultOptions, isFree=1, license="Apache License 2.0", pricing=pricing, banned=banned, parent='None', currOwner='None', owner='None') helper = PluginHelper() helper.createPluginSpace(globalVal.PLUGINS_DIR, key) helper.registerPlugin(key) return JsonResponse({'status':0}) @csrf_exempt def savePlugin(request): id = request.POST.get('id', '') pluginName = request.POST.get('pluginName', '') developer = request.POST.get('developer', '') version = request.POST.get('version', '') copyright = request.POST.get('copyright', '') description = request.POST.get('description', '') keywords = request.POST.get('keywords', '') logo = request.POST.get('logo', '') defaultOptions = request.POST.get('defaultOptions', '') pricing = request.POST.get('pricing', '') banned = request.POST.get('banned', '') Plugin.objects.filter(id=id).update( name=pluginName, developer=developer, pluginVersion=version, copyright=copyright, pluginDescription=description, keywords=keywords, logo=logo, defaultOptions=defaultOptions, pricing=pricing, banned=banned) return JsonResponse({'status':0}) def getAllPlugins(request): #name = request.session.get('name') # 从session中获取name? allPlugins = Plugin.objects.all().order_by('-lastupdate') plugins = [] for i in allPlugins: json_dict = {} json_dict['id'] = i.id json_dict['name'] = i.name json_dict['developer'] = i.developer json_dict['pluginVersion'] = i.pluginVersion json_dict['copyright'] = i.copyright json_dict['pluginDescription'] = i.pluginDescription json_dict['useCounting'] = i.useCounting json_dict['likeCounting'] = i.likeCounting json_dict['keywords'] = i.keywords json_dict['logo'] = i.logo json_dict['defaultOptions'] = i.defaultOptions json_dict['isFree'] = i.isFree json_dict['pricing'] = i.pricing json_dict['banned'] = i.banned json_dict['createDatetime'] = str(i.createDatetime) json_dict['lastupdate'] = str(i.lastupdate) json_dict['license'] = i.license plugins.append(json_dict) return JsonResponse(plugins,safe=False) def getPlugin(request): pluginId = request.GET.get('pluginId', '') i = Plugin.objects.get(id=pluginId) json_dict = {} json_dict['id'] = i.id json_dict['name'] = i.name json_dict['developer'] = i.developer json_dict['pluginVersion'] = i.pluginVersion json_dict['copyright'] = i.copyright json_dict['pluginDescription'] = i.pluginDescription json_dict['useCounting'] = i.useCounting json_dict['likeCounting'] = i.likeCounting json_dict['keywords'] = i.keywords json_dict['logo'] = i.logo json_dict['defaultOptions'] = i.defaultOptions json_dict['isFree'] = i.isFree json_dict['pricing'] = i.pricing json_dict['banned'] = i.banned json_dict['createDatetime'] = str(i.createDatetime) json_dict['lastupdate'] = str(i.lastupdate) json_dict['license'] = i.license return JsonResponse(json_dict) @csrf_exempt def deletePlugin(request): id = request.POST.get('id', '') Plugin.objects.filter(id=id).delete() helper = PluginHelper() helper.removePluginSpace(globalVal.PLUGINS_DIR, id) helper.writeoffPlugin(id) return JsonResponse({'status':'0'}) def zip_file_path(plugins_folder, plugin_folder): f = zipfile.ZipFile(plugins_folder + os.sep + plugin_folder + ".zip", 'w', zipfile.ZIP_DEFLATED) filelists = [] get_zip_file(plugins_folder + os.sep + plugin_folder, filelists) for file in filelists: fpath = file.replace(plugins_folder, '') f.write(file, fpath) f.close() return plugins_folder + os.sep + plugin_folder + ".zip" # zip one file to zip package def zip_file(plugins_folder, plugin_folder, file_path, file_name): f = zipfile.ZipFile(plugins_folder + os.sep + plugin_folder + os.sep + file_name + ".zip", 'w', zipfile.ZIP_DEFLATED) f.write(plugins_folder + os.sep + file_path, file_path) f.close() return plugins_folder + os.sep + plugin_folder + os.sep + file_name + ".zip" @csrf_exempt def downloadZipFile(request): pluginId = request.POST.get('oid', '') path = request.POST.get('path', '') filename = request.POST.get('filename', '') zipfilepath = "" if os.path.isfile(globalVal.PLUGINS_DIR + os.sep + path): zipfilepath = zip_file(globalVal.PLUGINS_DIR, pluginId, path, filename) elif os.path.isdir(globalVal.PLUGINS_DIR + os.sep + path): zipfilepath = zip_file_path(globalVal.PLUGINS_DIR, path) if os.path.isfile(zipfilepath): file = open(zipfilepath, 'rb') response = FileResponse(file) response['Content-Type'] = 'application/octet-stream' response["Content-disposition"] = "attachment;filename={0}".format(filename+".zip") return response return JsonResponse({ 'status' : '1' }) @csrf_exempt def getPluginDirectory(request): helper = PluginHelper() pluginId = request.POST.get('pluginId', '') menuItems = helper.pluginDirectory(globalVal.PLUGINS_DIR, pluginId) return HttpResponse(json.dumps(menuItems, default=lambda o: o.__dict__, sort_keys=True, indent=4), content_type="application/json") @csrf_exempt def uploadFiles(request): request.encoding = "utf-8" file = request.FILES.get("file", None) fileName = request.POST.get("filename", "",) targetPath = request.POST.get("path", "",) fileName = unquote(fileName) targetPath = unquote(targetPath) if file is None: return JsonResponse({ 'status' : '1', 'message': '文件不存在' }) else: path = globalVal.PLUGINS_DIR + os.sep + targetPath if os.path.isfile(path): with open(os.path.dirname(path) + os.sep + fileName, 'wb+') as f: for chunk in file.chunks(): f.write(chunk) elif os.path.isdir(path): with open(path + os.sep + fileName, 'wb+') as f: for chunk in file.chunks(): f.write(chunk) return JsonResponse({ 'status' : '0' }) @csrf_exempt def removeFileFolder(request): path = request.POST.get('context', '') helper = PluginHelper() helper.removeFileFolder(path) return JsonResponse({ 'status' : '0' }) @csrf_exempt def newFileFolder(request): parentPath = request.POST.get('parentFolder', '') name = request.POST.get('name', '') type = request.POST.get('type', '') helper = PluginHelper() helper.newFolderFile(parentPath, type, name) return JsonResponse({ 'status' : '0' }) @csrf_exempt def renameFileFolder(request): oldFolderFileName = request.POST.get('oldFolderFileName', '') oldName = request.POST.get('oldName', '') parentFolder = oldFolderFileName.replace(oldName, '') newFolderFileName = request.POST.get('newFolderFileName', '') helper = PluginHelper() try: helper.renameFolderFile(oldFolderFileName, parentFolder, newFolderFileName) return JsonResponse({ 'status' : '0' }) except FileExistsError as e: return JsonResponse({ 'status' : '1' }) @csrf_exempt def getDistDirectories(request): dirname = request.POST.get('dirname', '') pluginId = request.POST.get('pluginId', '') helper = PluginHelper() srcDirs = helper.getAllDirectories(globalVal.PLUGINS_DIR, pluginId, dirname) return HttpResponse(json.dumps(srcDirs, default=lambda o: o.__dict__, sort_keys=True, indent=4), content_type="application/json") @csrf_exempt def copyMoveTo(request): srcFolder = request.POST.get('srcFolder', '') distFolder = request.POST.get('distFolder', '') operation = request.POST.get('operation', '') helper = PluginHelper() helper.copyMoveTo(globalVal.PLUGINS_DIR + os.sep + srcFolder, globalVal.PLUGINS_DIR + os.sep + distFolder, operation) return JsonResponse({ 'status' : '0' }) @csrf_exempt def openFile(request): filePath = request.POST.get('filePath', '') helper = PluginHelper() fileContent = helper.openFile(globalVal.PLUGINS_DIR + os.sep + filePath) return JsonResponse({ 'status' : '0', 'fileContent' : fileContent }) @csrf_exempt def saveFile(request): filePath = request.POST.get('filePath', '') fileContent = request.POST.get('fileContent', '') helper = PluginHelper() helper.saveFile(globalVal.PLUGINS_DIR + os.sep + filePath, fileContent); return JsonResponse({ 'status' : '0' }) def start(request): a = request.GET start = a.get('start') print(start) return JsonResponse({'status':'启动成功'}) def stop(request): a = request.GET stop = a.get('stop') print(stop) return JsonResponse({'status':'停止成功'})
''''''''''''''''''''''''''''''''''''''' GenerateDummyData.py / メイン処理 ''''''''''''''''''''''''''''''''''''''' import utils as ut print('') print('=========================') print(' GenerateDummyData ') print('=========================') try: # 設定ファイル用フォルダから設定ファイルのリストを取得 print('\nsettings フォルダに格納されている JSON ファイルを表示します。\n') setting_file_path_list = ut.get_setting_file_path_list() # 表示したリストから設定ファイルの選択を要求しファイルパスを取得 setting_file_path = ut.request_select_file_path_list(setting_file_path_list) # 設定ファイルから設定値を読み込んでそれぞれの設定値の変数へ格納 generate_rows_num, faker_language, seed_value, add_wqm, generate_data_dict = ut.import_json(setting_file_path) print('\n設定ファイルの読み込みに成功しました。設定内容を表示します。') # 読み込んだ生成設定をプリント ut.print_settings(setting_file_path, generate_rows_num, faker_language, seed_value, add_wqm, generate_data_dict) print('\n上記の設定でダミーデータを生成します。') print('問題なければ Enter キーを押してください。') print('\n[!] 生成する行数が多すぎると完了まで時間がかかる場合があります。') input(' 処理の途中で中断する場合は Ctrl + C キーを押してください。') # CSV の出力先ファイルパスを設定 # python の実行ファイルと同階層の export フォルダへ出力する export_file_path = ut.make_export_file_path(ut.make_export_dir_path()) # ダミーデータを生成するメイン処理 # 生成時間が長い場合に備えて生成中であることを示すテキストを表示 print('\nダミーデータを生成しています……。') raw = ut.generate_dummy_data_raw(generate_rows_num, faker_language, seed_value, generate_data_dict) # add_wqm の設定が False であれば " " の削除関数を実行する if not add_wqm: raw = ut.remove_wqm(raw) print('ダミーデータを生成しました。') # 生成結果プレビューをプリント ut.print_relust(raw) # CSV ファイルとして出力 with open(export_file_path, 'w', newline='', encoding='utf-8') as f: f.write(raw) print('ダミーデータを生成しました。') print('CSV ファイルを以下へ出力しました。') print(export_file_path) except KeyboardInterrupt: print('\nキーボード入力により処理を中断しました。') print('\nGenerateDummyData を終了します。')
from tests.case import DSLTestCase from xpath import dsl as x from xpath.renderer import to_xpath class TestInverse(DSLTestCase): __fixture__ = "simple.html" def test_inverts_the_expression(self): xpath = to_xpath(x.descendant("p")[x.attr("id").equals("fooDiv").inverse]) results = self.find_all(xpath) self.assertEqual(results[0].text, "Bax") def test_aliased_as_the_unary_tilde(self): xpath = to_xpath(x.descendant("p")[~x.attr("id").equals("fooDiv")]) results = self.find_all(xpath) self.assertEqual(results[0].text, "Bax")
import os from time import sleep import psutil import sh from quorumtoolbox.constellation import Constellation print('====== START CONSTELLATION TEST =======') cn = Constellation('company1_q2_n0', 'https://10.65.11.96', port=9000, other_nodes=['http://127.0.0.1:9000/', 'http://10.11.11.11:9000/']) print('Created all artifacts and keys need for constellation node.') print('====== PASS =======')
from django.conf.urls import patterns, url, include from django.contrib import admin from drf_tools.routers import NestedRouterWithExtendedRootView from .views import TestResourceViewSet, RelatedResource1ViewSet, RelatedResource2ViewSet admin.autodiscover() router = NestedRouterWithExtendedRootView(list()) test_resource_route = router.register(r'test-resources', TestResourceViewSet) test_resource_route.register(r'related-1', RelatedResource1ViewSet, ['resource']) test_resource_route.register(r'related-2', RelatedResource2ViewSet, ['resource']) urlpatterns = patterns( '', url(r'', include(router.urls)), )
# Generated by Django 2.2 on 2019-04-09 17:36 from django.db import migrations, models import django.db.models.deletion import localflavor.br.models import sagii.commons.validators class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='CondicaoLimitante', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tipo', models.IntegerField(choices=[(1, 'Deficiência'), (2, 'Transtorno')])), ('descricao', models.CharField(max_length=140)), ], ), migrations.CreateModel( name='DocumentoPessoalTipo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nome', models.CharField(max_length=20)), ], options={ 'verbose_name': 'Documento Pessoal Tipo', 'verbose_name_plural': 'Documentos Pessoais Tipos', }, ), migrations.CreateModel( name='Pessoa', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('nome_razao_social', models.CharField(max_length=255)), ], ), migrations.CreateModel( name='PessoaFisica', fields=[ ('pessoa_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Pessoa')), ('sexo', models.CharField(blank=True, choices=[('M', 'Masculino'), ('F', 'Feminino')], max_length=1, null=True)), ('estado_civil', models.IntegerField(blank=True, choices=[(1, 'Solteiro'), (2, 'Casado'), (3, 'Viúvo'), (4, 'Divorciado')], null=True)), ('tipo_sanguineo', models.CharField(blank=True, choices=[('A+', 'A+'), ('A-', 'A-'), ('B+', 'B+'), ('B-', 'B-'), ('AB+', 'AB+'), ('AB-', 'AB-'), ('O+', 'O+'), ('O-', 'O-')], max_length=3, null=True)), ('natural_cidade', models.CharField(blank=True, max_length=120, null=True)), ('natural_uf', localflavor.br.models.BRStateField(blank=True, max_length=2, null=True)), ('nacionalidade', models.CharField(blank=True, choices=[('BR', 'Brasileiro'), ('EST', 'Estrangeiro')], default='BR', max_length=3, null=True)), ('falecido', models.BooleanField(default=False)), ('cpf', localflavor.br.models.BRCPFField(max_length=14, unique=True)), ('condicoes_limitantes', models.ManyToManyField(to='base.CondicaoLimitante')), ], options={ 'verbose_name': 'Pessoa Física', 'verbose_name_plural': 'Pessoas Físicas', }, bases=('base.pessoa',), ), migrations.CreateModel( name='Endereco', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('tipo', models.IntegerField(choices=[(1, 'Comercial'), (2, 'Residencial'), (3, 'Rural')])), ('cep', localflavor.br.models.BRPostalCodeField(max_length=9)), ('logradouro', models.CharField(max_length=255)), ('complemento', models.CharField(blank=True, max_length=255, null=True)), ('bairro', models.CharField(max_length=120)), ('numero', models.CharField(max_length=120)), ('cidade', models.CharField(max_length=120)), ('uf', localflavor.br.models.BRStateField(max_length=2)), ('principal', models.BooleanField(default=False)), ('pessoa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='base_endereco_related', related_query_name='base_enderecos', to='base.Pessoa')), ], options={ 'verbose_name': 'Endereço', 'verbose_name_plural': 'Endereços', }, ), migrations.CreateModel( name='Telefone', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('tipo', models.IntegerField(choices=[(1, 'Tel. Fixo'), (2, 'Tel. Celular')])), ('numero', models.CharField(max_length=120, validators=[sagii.commons.validators.PhoneRegexValidator()])), ('observacoes', models.TextField(blank=True, null=True)), ('pessoa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='base_telefone_related', related_query_name='base_telefones', to='base.Pessoa')), ], options={ 'unique_together': {('numero', 'pessoa')}, }, ), migrations.CreateModel( name='RelacaoDependencia', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('grau_parentesco', models.IntegerField(choices=[(1, 'Pai/Mãe'), (2, 'Avô/Avó'), (3, 'Tio/Tia'), (4, 'Outro')], null=True)), ('grau_parentesco_outro', models.CharField(blank=True, max_length=60, null=True)), ('dependente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responsaveis', to='base.PessoaFisica')), ('responsavel', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='_deps', to='base.PessoaFisica')), ], ), migrations.CreateModel( name='PessoaJuridica', fields=[ ('pessoa_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Pessoa')), ('cnpj', localflavor.br.models.BRCNPJField(max_length=18, unique=True)), ('matriz', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='filiais', to='base.PessoaJuridica')), ], options={ 'verbose_name': 'Pessoa Jurídica', 'verbose_name_plural': 'Pessoas Jurídicas', }, bases=('base.pessoa',), ), migrations.AddField( model_name='pessoafisica', name='dependentes', field=models.ManyToManyField(through='base.RelacaoDependencia', to='base.PessoaFisica'), ), migrations.CreateModel( name='DocumentoPessoal', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('valor', models.CharField(max_length=60)), ('observacoes', models.CharField(blank=True, max_length=140, null=True)), ('pessoa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='base_documentopessoal_related', related_query_name='base_documentopessoals', to='base.Pessoa')), ('tipo', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='base.DocumentoPessoalTipo')), ], options={ 'verbose_name': 'Documento Pessoal', 'verbose_name_plural': 'Documentos Pessoais', 'unique_together': {('tipo', 'pessoa')}, }, ), migrations.CreateModel( name='ContatoSocial', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('tipo', models.CharField(choices=[('Whatsapp', 'Whatsapp'), ('Telegram', 'Telegram'), ('Facebook', 'Facebook'), ('Instagram', 'Instagram'), ('Twitter', 'Twitter'), ('Website', 'Website'), ('Email', 'Email'), ('Skype', 'Skype'), ('Linkedin', 'Linkedin')], max_length=60)), ('valor', models.CharField(max_length=60)), ('pessoa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='base_contatosocial_related', related_query_name='base_contatosocials', to='base.Pessoa')), ], options={ 'verbose_name': 'Contato Social', 'verbose_name_plural': 'Contatos Sociais', 'unique_together': {('pessoa', 'tipo')}, }, ), ]
import urllib from tornado import escape from tornado import httpclient from tornado.options import options services = [] handlers = [] if hasattr(options, 'twitter_key') and hasattr(options, 'twitter_secret'): services.append('twitter') from june.social import twitter handlers.extend(twitter.handlers) def register(networks, content): for name in networks: if name not in services: return service = networks[name] if service['enabled'] != 'y': return http = httpclient.AsyncHTTPClient() url = 'http://127.0.0.1:%s/social/%s' % (options.port, name) post_args = {'token': service['token'], 'content': escape.utf8(content)} http.fetch(url, method="POST", body=urllib.urlencode(post_args), callback=None)
#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script configures the logstash output to forward to elasticsearch # The environment variable elasticsearch_ip is expected to be set up import os with open("/etc/logstash/conf.d/elasticsearch.conf", 'w') as fh: fh.write(""" output { elasticsearch { action => index host => "%s" protocol => "http" } }""" % (os.environ['elasticsearch_ip']))
#!/usr/bin/env python '''This starts the python interpreter; captures the startup message; then gives the user interactive control over the session. Why? For fun... PEXPECT LICENSE This license is approved by the OSI and FSF as GPL-compatible. http://opensource.org/licenses/isc-license.txt Copyright (c) 2012, Noah Spurrier <[email protected]> PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ''' from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import pexpect # Don't do this unless you like being John Malkovich # c = pexpect.spawnu('/usr/bin/env python ./python.py') # Note that, for Python 3 compatibility reasons, we are using spawnu and # importing unicode_literals (above). spawnu accepts Unicode input and # unicode_literals makes all string literals in this script Unicode by default. c = pexpect.spawnu('/home/test/miniconda3/envs/Dev/bin/python') # c.expect('>>>') # print('And now for something completely different...') # print(''.join(reversed((c.before)))) # print('Yes, it\'s python, but it\'s backwards.') # print() # print('Escape character is \'^]\'.') # print(c.after, end=' ') c.interact() c.kill(1) print('is alive:', c.isalive())
import argparse import numpy as np from gym2048 import Gym2048 def parse_args(): parser = argparse.ArgumentParser(description='RL on 2048') parser.add_argument('--agent', help='agent type', choices=['random', 'pg', 'dqn'], default='random') parser.add_argument('--train', help='do training', action='store_true') parser.add_argument('--test', help='do testing', action='store_true') parser.add_argument('--model_path', help='path to model', default='model.h5') return parser.parse_args() if __name__ == '__main__': args = parse_args() gym = Gym2048() env = gym.make() if args.agent == 'random': from agents.core import AgentRandom agent = AgentRandom(env) elif args.agent == 'pg': from agents.core import AgentPG agent = AgentPG(env, args.model_path) else: from agents.core import AgentDQN agent = AgentDQN(env, args.model_path) if args.train: agent.train() if args.test: if args.agent != 'random': agent.load(args.model_path) num_episode = 100 rewards = [] scores = [] max_blocks = [] for e in range(num_episode): state = env.reset() done = False episode_reward = 0 while not done: action = agent.make_action(state) state, reward, done, info = env.step(action) # env.render() episode_reward += reward rewards.append(episode_reward) scores.append(env.game.score) max_blocks.append(env.game.grid.max()) print('Game #{} \tReward: {:.2f}\tScore: {}\tLargest block: {}'.format( e, episode_reward, scores[-1], max_blocks[-1])) print('Ran {} episodes.'.format(num_episode)) print('Avg reward: {}.'.format(np.mean(rewards))) print('Avg score: {}.'.format(np.mean(scores))) print('Largest block: {}.'.format(np.max(max_blocks)))
""" QuartzNet for ASR, implemented in Chainer. Original paper: 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. """ __all__ = ['quartznet5x5_en_ls', 'quartznet15x5_en', 'quartznet15x5_en_nr', 'quartznet15x5_fr', 'quartznet15x5_de', 'quartznet15x5_it', 'quartznet15x5_es', 'quartznet15x5_ca', 'quartznet15x5_pl', 'quartznet15x5_ru', 'quartznet15x5_ru34'] from .jasper import get_jasper def quartznet5x5_en_ls(classes=29, **kwargs): """ QuartzNet 5x5 model for English language (trained on LibriSpeech dataset) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(classes=classes, version=("quartznet", "5x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet5x5_en_ls", **kwargs) def quartznet15x5_en(classes=29, **kwargs): """ QuartzNet 15x5 model for English language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_en", **kwargs) def quartznet15x5_en_nr(classes=29, **kwargs): """ QuartzNet 15x5 model for English language (with presence of noise) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 29 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_en_nr", **kwargs) def quartznet15x5_fr(classes=43, **kwargs): """ QuartzNet 15x5 model for French language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 43 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï', 'ü', 'ÿ'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_fr", **kwargs) def quartznet15x5_de(classes=32, **kwargs): """ QuartzNet 15x5 model for German language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 32 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_de", **kwargs) def quartznet15x5_it(classes=39, **kwargs): """ QuartzNet 15x5 model for Italian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 39 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_it", **kwargs) def quartznet15x5_es(classes=36, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 36 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_es", **kwargs) def quartznet15x5_ca(classes=39, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 39 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ca", **kwargs) def quartznet15x5_pl(classes=34, **kwargs): """ QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 34 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń', 'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_pl", **kwargs) def quartznet15x5_ru(classes=35, **kwargs): """ QuartzNet 15x5 model for Russian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 35 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ru", **kwargs) def quartznet15x5_ru34(classes=34, **kwargs): """ QuartzNet 15x5 model for Russian language (32 graphemes) from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261. Parameters: ---------- classes : int, default 34 Number of classification classes (number of graphemes). pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я'] return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary, model_name="quartznet15x5_ru34", **kwargs) def _test(): import numpy as np import chainer chainer.global_config.train = False pretrained = False from_audio = True audio_features = 64 models = [ quartznet5x5_en_ls, quartznet15x5_en, quartznet15x5_en_nr, quartznet15x5_fr, quartznet15x5_de, quartznet15x5_it, quartznet15x5_es, quartznet15x5_ca, quartznet15x5_pl, quartznet15x5_ru, quartznet15x5_ru34, ] for model in models: net = model( in_channels=audio_features, from_audio=from_audio, pretrained=pretrained) weight_count = net.count_params() print("m={}, {}".format(model.__name__, weight_count)) assert (model != quartznet5x5_en_ls or weight_count == 6713181) assert (model != quartznet15x5_en or weight_count == 18924381) assert (model != quartznet15x5_en_nr or weight_count == 18924381) assert (model != quartznet15x5_fr or weight_count == 18938731) assert (model != quartznet15x5_de or weight_count == 18927456) assert (model != quartznet15x5_it or weight_count == 18934631) assert (model != quartznet15x5_es or weight_count == 18931556) assert (model != quartznet15x5_ca or weight_count == 18934631) assert (model != quartznet15x5_pl or weight_count == 18929506) assert (model != quartznet15x5_ru or weight_count == 18930531) assert (model != quartznet15x5_ru34 or weight_count == 18929506) batch = 3 aud_scale = 640 if from_audio else 1 seq_len = np.random.randint(150, 250, batch) * aud_scale seq_len_max = seq_len.max() + 2 x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max) x = np.random.rand(*x_shape).astype(np.float32) x_len = seq_len.astype(np.long) y, y_len = net(x, x_len) assert (y.shape[:2] == (batch, net.classes)) if from_audio: assert (y.shape[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9)) else: assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1]) if __name__ == "__main__": _test()
#!/usr/bin/env python # -*- coding: utf-8 -*- from .protocol import Protocol, ErrorResponse from .transport import ClientTransport from .serializer import Serializer class Client: """Client for making RPC calls to connected servers. :param protocol: An :py:class:`~tinyrpc.RPCProtocol` instance. :param transport: A :py:class:`~tinyrpc.transports.ClientTransport` instance. """ def __init__(self, protocol: Protocol, serializer: Serializer, transport: ClientTransport): self.__protocol = protocol self.__serializer = serializer self.__transport = transport self.__active = False @property def protocol(self): return self.__protocol @property def serializer(self): return self.__serializer @property def transport(self): return self.__transport @property def active(self): return self.__active async def open(self): await self.transport.open() self.__active = True return self async def close(self): await self.transport.close() self.__active = False return self async def call(self, method, args, kwargs, one_way=False): """Calls the requested method and returns the result. If an error occured, an :py:class:`~tinyrpc.exc.RPCError` instance is raised. :param method: Name of the method to call. :param args: Arguments to pass to the method. :param kwargs: Keyword arguments to pass to the method. :param one_way: Whether or not a reply is desired. """ if not self.__active: raise RuntimeError('Client is closed') req = self.protocol.create_request(method, args, kwargs, one_way) req_data = self.serializer.serialize(req.to_data()) # sends ... reply = await self.transport.send_message(req_data, not one_way) if one_way: # ... and be done return # ... or process the reply rep_data = self.serializer.deserialize(reply) rep = self.protocol.parse_response(rep_data) if isinstance(rep, ErrorResponse): raise rep.to_exception() return rep.result def get_proxy(self, prefix='', one_way=False): """Convenience method for creating a proxy. :param prefix: Passed on to :py:class:`~tinyrpc.client.RPCProxy`. :param one_way: Passed on to :py:class:`~tinyrpc.client.RPCProxy`. :return: :py:class:`~tinyrpc.client.RPCProxy` instance.""" return Proxy(self, prefix, one_way)
#!/usr/bin/env python """ Copyright (c) 2012, Bruce A. Corliss All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the BACETech Consulting nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Bruce A. Corliss BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import wx import os import shutil import socket import sys import time import threading tcp_socket = None recieve_timer = None CMD_DELIM_DEFAULT = ';' MSG_DELIM_DEFAULT = ';;' APP_W = 700 APP_H = 700 PAD=10 BUFFER_SIZE = 1024 class TextFrame(wx.Frame): def __init__(self): wx.Frame.__init__(self, None, -1, 'Zen Controller Debug Client', size=(APP_W , APP_H)) # Add panel self.panel = wx.Panel(self, wx.ID_ANY) # TCP Connection Objects self.hostAddress_text = wx.StaticText(self.panel, -1, "IP Address", pos=(150,10)) self.hostAddress_edit = wx.TextCtrl(self.panel, -1, "127.0.0.1", pos=(210, 10), size=(75, 15)) self.hostPort_text = wx.StaticText(self.panel, -1, "Port", pos=(150, 25), size=(20,20)) self.hostPort_edit = wx.TextCtrl(self.panel, -1, "22500", pos=(210, 25), size=(75, 15)) self.connect_toggle = wx.ToggleButton(self.panel, -1, "Connect", pos=(10, 8), size=(100,35)) self.cmdDelim_edit = wx.StaticText(self.panel, -1, "Cmd Delim", pos=(330, 10)) self.cmdDelim_edit = wx.TextCtrl(self.panel, -1, CMD_DELIM_DEFAULT, pos=(390, 10), size=(25, 15)) self.msgDelim_edit = wx.StaticText(self.panel, -1, "Msg Delim", pos=(330, 25)) self.msgDelim_edit = wx.TextCtrl(self.panel, -1, MSG_DELIM_DEFAULT, pos=(390, 25), size=(25, 15)) # Command input self.command_text = wx.StaticText(self.panel, -1, "Enter Commands (one per line, no Cmd or Msg Delims).", pos=(10,50)) self.command_edit = wx.TextCtrl(self.panel, -1,"",size=(APP_W - 3*PAD, 200), style=wx.TE_MULTILINE, pos=(PAD,70)) self.command_edit.SetInsertionPoint(0) self.com_button = wx.Button(self.panel, -1, "Send all Commands", pos=(10, 275), size=(100,20)) self.clearText_checkbox = wx.CheckBox(self.panel, -1, 'Clear Text on Send', (APP_W-150, 280)) self.loopCmd_checkbox = wx.CheckBox(self.panel, -1, 'Loop Send', (120, 280)) self.clearText_checkbox.SetValue(False) # Server response self.output_text = wx.StaticText(self.panel, -1, "Output", pos=(PAD,305)) self.output_edit = wx.TextCtrl(self.panel, -1,'',size=(APP_W - 3*PAD, 325), style=wx.TE_MULTILINE, pos=(PAD,320)) self.output_edit.SetEditable(False) # Callbacks self.command_edit.Bind(wx.EVT_CHAR, self.onCharEvent) self.com_button.Bind(wx.EVT_BUTTON, self.SendCommand, self.com_button) self.connect_toggle.Bind(wx.EVT_TOGGLEBUTTON, self.ConnectCallback, self.connect_toggle) def ConnectCallback(self, event): """ Initialize TCP connection and then start a new thread to listen for messages.""" if self.connect_toggle.GetValue(): self.LogThis("Attempting to connect...") self.connect_toggle.SetLabel("Disconnect") self.TcpConnect() else: self.LogThis("Disconnecting...") self.connect_toggle.SetLabel("Connect") if not tcp_socket and tcp_socket.isalive(): # Closing tcp connection tcp_socket.close() def TcpConnect(self): global tcp_socket host = self.hostAddress_edit.GetLabel() port = self.hostPort_edit.GetLabel() # Connect to server tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.LogThis("Connecting to: " + host + ": " + port) tcp_socket.connect((host,int(port))) def ContinuousRecieveData(self, event): """ Wait for server to send data, and print to comamnd line.""" global tcp_socket print "Waiting for server response" while True: buffer_str = ""; while True: if not self.connect_toggle.GetValue(): return wx.Yield() buffer_str += tcp_socket.recv(BUFFER_SIZE) print "Server response recieved" if buffer_str.find(MSG_DELIM) >= 0: print msg_str + "EOM" time.sleep(.5) def onCharEvent(self, event): keycode = event.GetKeyCode() controlDown = event.CmdDown() altDown = event.AltDown() shiftDown = event.ShiftDown() # Communication delimiters CMD_DELIM = self.cmdDelim_edit.GetValue() MSG_DELIM = self.msgDelim_edit.GetValue() if controlDown and keycode == wx.WXK_SPACE: # Send current line or selection to command window (ind1, ind2) = self.command_edit.GetSelection() if ind1 == ind2: curPos = self.command_edit.GetInsertionPoint() lineNum = len(self.command_edit.GetRange( 0, self.command_edit.GetInsertionPoint() ).split("\n"))-1 msg_out = self.command_edit.GetLineText(lineNum) self.LogThis(msg_out + "\n") tcp_socket.sendall(msg_out + MSG_DELIM) # Recieve data until "DONE;;" is recieved self.RecieveServerResponse() else: lineNum1 = len(self.command_edit.GetRange( 0, ind1).split("\n"))-1 lineNum2 = len(self.command_edit.GetRange( 0, ind2).split("\n")) msg_out = [self.command_edit.GetLineText(x) for x in range(lineNum1, lineNum2)] self.LogThis("Client:\t" + CMD_DELIM.join(msg_out) + CMD_DELIM + MSG_DELIM) tcp_socket.sendall(CMD_DELIM.join(msg_out) + MSG_DELIM) # Recieve data until Msg Delim self.RecieveServerResponse() else: event.Skip() def LogThis(self, output_str): print output_str self.output_edit.AppendText("\n" + output_str) self.output_edit.ShowPosition(self.output_edit.GetLastPosition()) self.output_edit.Refresh() def RecieveServerResponse(self): global tcp_socket MSG_DELIM = self.msgDelim_edit.GetValue() CMD_DELIM = self.cmdDelim_edit.GetValue() buffer_str = '' while True: if not self.connect_toggle.GetValue(): return wx.Yield() buffer_str += tcp_socket.recv(1024) if buffer_str.find("DONE" + MSG_DELIM) >= 0: self.LogThis("Server:\t" + (MSG_DELIM + "\nServer:\t").join(buffer_str.split(MSG_DELIM)[:-1]) + MSG_DELIM) break def OnButtonClick(self, event): self.panel.Refresh() def SendCommand(self, event): global tcp_socket MSG_DELIM = self.msgDelim_edit.GetValue() CMD_DELIM = self.cmdDelim_edit.GetValue() while True: wx.Yield() msg_out = self.command_edit.GetValue() self.LogThis("Client:\t" + (CMD_DELIM+' ').join(msg_out.split('\n')) + MSG_DELIM) tcp_socket.sendall(CMD_DELIM.join(msg_out.split('\n')) + MSG_DELIM) # Recieve data until Msg Delim self.RecieveServerResponse() if not self.loopCmd_checkbox.GetValue() or not self.connect_toggle.GetValue(): break time.sleep(1) def main(): app = wx.PySimpleApp() frame = TextFrame() frame.Show() app.MainLoop() if __name__ == "__main__": main()
#!/usr/bin/env python from distutils.core import setup config = {} setup(**config)
"""Main pytorch lightning module""" from argparse import ArgumentParser from models.multi_channel_conv_nets import P2PModel, binary_clf_model import os from pathlib import Path import pytorch_lightning as pl from sklearn.model_selection import train_test_split import torch from torch import nn from utils.torchutils import set_module_trainable class BaseModule(pl.LightningModule): """Base module, to be inhereted by specific modules.""" def __init__(self, hparams): super().__init__() torch.backends.cudnn.benchmark = True self.hparams = hparams if hasattr(hparams, "ch_out"): ch_out = hparams.ch_out else: ch_out = hparams.channels self.model = P2PModel(model_name=hparams.arch, ch_in=hparams.channels, ch_out=ch_out) set_module_trainable(self.model, False) if hparams.arch == "unet": set_module_trainable(self.model.model.out, True) else: set_module_trainable(self.model.model.classifier[-1], True) self.unfrozen = False if hasattr(self.hparams, "input_weights") and hparams.use_pretrained: try: self.load_model(self.save_dir.parents[0] / self.input_weights) except: print("pretrained weights not found") self.data_root = Path(hparams.data_root) all_fnames = os.listdir(self.data_root / "x") self.train_fnames, self.val_fnames = train_test_split(all_fnames, test_size=hparams.val_size) self.inspection_batch = None if hasattr(hparams, "adv_model"): adv_model = hparams.adv_model self.adv_model = binary_clf_model(adv_model, ch_out) set_module_trainable(self.adv_model, False) set_module_trainable(self.adv_model.fc, True) self.adv_loss_fn = nn.BCEWithLogitsLoss() if not hasattr(hparams, "w_adv"): print("No value for adversarial loss weight given. Using 0.5") self.w_adv = 0.5 else: self.w_adv = hparams.w_adv else: self.adv_model = None self.w_adv = 0 def forward(self, x): return self.model(x) def unfreeze_layers(self): """Make sure to call in training_step""" if not self.unfrozen and self.current_epoch > self.hparams.epochs_til_unfreezing: self.unfrozen = True set_module_trainable(self.model, True) if self.adv_model is not None: set_module_trainable(self.adv_model, True) @staticmethod def get_optims(params, lr): opt = torch.optim.Adam(params, lr=lr) sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=10) return opt, sch def configure_optimizers(self): """If there is not advisarial model return one optimiser and one scheduler. If we have an adversarial model return an optimiser and scheduler for both the model and discriminator. Currently only adam and cosine annealing with the same learning rate for both models are implemented. """ optimizer, scheduler = self.get_optims(self.model.parameters(), self.hparams.learning_rate) if self.adv_model is None: return [optimizer], [scheduler] else: optimizer_disc, scheduler_disc = self.get_optims(self.adv_model.parameters(), self.hparams.learning_rate) return [optimizer, optimizer_disc], [scheduler, scheduler_disc] def save_model(self, path): """Save a state dict only (Not including pytorch lightning specific data)""" torch.save(self.model.state_dict(), path) def load_model(self, path): """Load weights from a state dict. Ignore weights with sizes that do not match. This is used for loading weights from pretraining.""" pretrained_dict = torch.load(path) model_dict = self.model.state_dict() pretrained_dict = {k:v for k, v in pretrained_dict.items() if (k in model_dict) and (v.size() == model_dict[k].size())} model_dict.update(pretrained_dict) self.model.load_state_dict(model_dict) @staticmethod def __add_base_args(parent_parser): """Make sure to call in add_model_specific_args""" parser = ArgumentParser(parents=[parent_parser]) parser.add_argument("--data_root", type=str) parser.add_argument('--batch_size', default=16, type=int) parser.add_argument("--learning_rate", default=0.001, type=float) parser.add_argument("--arch", default="deeplabv3", type=str) parser.add_argument("--loss", default="L2", type=str) parser.add_argument("--channels", default=3, type=int) parser.add_argument("--val_size", default=0.1, type=float) parser.add_argument("--steps_til_unfreezing", default=10000, type=int) parser.add_argument("--no_data_aug", action="store_true") return parser @staticmethod def add_model_specific_args(parent_parser, root_dir): """Method for adding arguments to an argparser for stand alone training""" raise NotImplementedError
from fastapi import APIRouter, Depends from pymongo.database import Database from dependencies import get_m_db from mongo import operations from responses import ApiResponse router = APIRouter(prefix="/mg/movies", tags=["movies"]) @router.get("", response_model_exclude_unset=True) def get_movies(db: Database = Depends(get_m_db), page: int = 1, size: int = 20): movies = operations.get_movies(db, page, size) return ApiResponse.respond(movies) @router.get("/{id}") def get_actor(id: int): return {"success": True, "data": []}
""" Persistent property of WX GUI applications that are specific for each computer, not global line "persistent_property" Example: A window that remembers its size. wx.app = wx.App(redirect=False) class Window(wx.Frame): size = setting("size",(400,250)) def __init__(self): wx.Frame.__init__(self,parent=None,size=self.size) self.Bind(wx.EVT_SIZE,self.OnResize) self.Layout() self.Show() def OnResize(self,event): event.Skip() self.size = tuple(self.Size) win = Window() wx.app.MainLoop() Author: Friedrich Schotte Date created: 2017-11-20 Date last modified: 2018-12-04 """ __version__ = "1.1" # name: accepting "TimingPanel.refresh_period" import wx from logging import debug,info,warn,error def setting(name,default_value=0.0): """A presistent property of a class""" def class_name(self): if "." in name: class_name = name.split(".")[0] else: class_name = getattr(self,"name",self.__class__.__name__) return class_name def my_name(): if "." in name: my_name = name.split(".")[1] else: my_name = name return my_name def get(self): from time import time if not hasattr(self,"config") or self.config.last_read < time()-1: self.config = wx.Config(class_name(self)) self.config.last_read = time() value = self.config.Read(my_name()) dtype = type(default_value) from numpy import nan,inf # for eval try: value = dtype(eval(value)) except: value = default_value return value def set(self,value): debug("%s.%s = %r" % (class_name(self),my_name(),value)) from time import time if not hasattr(self,"config"): self.config = wx.Config(class_name(self)) self.config.last_read = time() self.config.Write(my_name(),repr(value)) self.config.Flush() return property(get,set) if __name__ == "__main__": from pdb import pm # for debugging import logging # for debugging logging.basicConfig( level=logging.DEBUG, format="%(asctime)s %(levelname)s: %(message)s", ) import wx app = wx.App(redirect=False) ##config = wx.Config("TimingPanel") class Timing_Setup_Panel(object): refresh_period = setting("TimingPanel.refresh_period",1.0) TimingPanel = Timing_Setup_Panel() self = TimingPanel # for debugging ##print('config.Read("refresh_period")') ##print('config.Write("refresh_period","1.0"); config.Flush()') ##print('config.Write("refresh_period","2.0"); config.Flush()') print('TimingPanel.refresh_period') print('TimingPanel.refresh_period = 1.0') print('TimingPanel.refresh_period = 2.0')
# -*- coding: UTF-8 -*- from pp import initdb initdb.save_list()
print('NNNNN')
"""Alinment tool. An alignment tool takes an image of an object and aligns it to obtain a standardized version of the image, which may improve subsequent tasks like verification, recognition, or attribute estimation. """ # standard imports from typing import TypeVar, Generic, Optional # thirdparty imports import numpy as np # toolbox imports from ..base.implementation import Implementable from ..base.image import Imagelike, Image, ImageWarper from ..base.image import Landmarks, Size, Sizelike from .face.landmarks import Detector as LandmarksDetector LandmarksType = TypeVar('LandmarksType', bound=Landmarks) class LandmarkAligner(Generic[LandmarksType]): """A :py:class:`LandmarkAligner` aligns images based on specific landmarks. It employs a :py:class:`LandmarksDetector` to detect the landmarks in an image and then computes a transformation to move these landmarks to their reference points. The goal is to transform the original image in a way that the positions of the landmarks are moved to predefined standard positions. A :py:class:`LandmarkAligner` uses a specific landmarking scheme that specifies the number and meaning of the landmarks. The :py:class:`LandmarkDetector` defines standard positions for these landmarks. The :py:class:`LandmarkAligner` relies either on a compatible :py:class:`Landmarker` to obtain the landmarks for a given input image, or these landmarks have to be provided expilitly. """ _detector: LandmarksDetector[LandmarksType] = None _reference: LandmarksType = None _size: Size = None _image_warper: ImageWarper = None def __init__(self, detector: LandmarksDetector[LandmarksType], size: Optional[Sizelike] = None, warper: Optional[ImageWarper] = None, **kwargs) -> None: """ """ super().__init__(**kwargs) self._detector = detector self.size = size self._image_warper = ImageWarper() if warper is None else warper @property def size(self) -> Size: return self._size @size.setter def size(self, size: Sizelike) -> None: self._size = \ self._detector._reference_size if size is None else Size(size) self._reference = self._detector.reference(self._size) @property def reference(self) -> LandmarksType: return self._reference @property def detector(self) -> LandmarksDetector: return self._detector @property def warper(self) -> ImageWarper: return self._image_warper def compute_transformation(self, landmarks: LandmarksType) -> np.ndarray: """Compute an (affine) transformation to map the given landmarks to the reference landmarks of the :py:class:`LandmarkAligner`. """ transformation = \ self._image_warper.compute_transformation(landmarks.points, self._reference.points) return transformation def apply_transformation(self, image: Imagelike, transformation: np.ndarray) -> np.ndarray: """Apply a Transformation to an image. Arguments --------- image: The image to be transformed. transformation: The transformation to be applied """ aligned = self._image_warper.warp(image, transformation, self._size) return aligned def __call__(self, image: Imagelike, landmarks: Optional[LandmarksType] = None) -> np.ndarray: """Align an image by applying an (affine) transformation that maps source points to target points. Arguments --------- image: The image to align. landmarks: A list of points to be mapped onto the reference points, given as (x,y) coordinates. If `None`, then the detector will be used to obtain landmarks, and all detections will be aligned. Result ------ aligned: The aligned image (if landmarks were given) or a batch of aligned images. """ image = Image.as_array(image) if landmarks is None: detections = self._detector.detect_landmarks(image) if not detections: return None result = np.nparray((len(detections), self._size.height, self._size.width, image.shape[2])) for idx, landmarks in enumerate(detections): result[idx] = self(image, landmarks) return result else: transformation = self.compute_transformation(landmarks) return self.apply_transformation(image, transformation)
import pytest from ebl.corpus.domain.chapter import Stage from ebl.transliteration.domain.text_id import TextId from ebl.transliteration.domain.museum_number import MuseumNumber from ebl.lemmatization.domain.lemmatization import LemmatizationToken from ebl.transliteration.domain.atf import Atf, Status, Surface from ebl.transliteration.domain.genre import Genre from ebl.transliteration.domain.labels import SurfaceLabel from ebl.transliteration.domain.line_number import LineNumber from ebl.transliteration.domain.parallel_line import ( ChapterName, Labels, ParallelComposition, ParallelFragment, ParallelText, ) @pytest.mark.parametrize( "cf,duplicates,labels,display_value", [ ( True, True, Labels(surface=SurfaceLabel((Status.CORRECTION,), Surface.OBVERSE)), "cf. F K.1 &d o! 1", ), (False, False, Labels(), "F K.1 1"), ], ) def test_parallel_fragment(cf, duplicates, labels, display_value) -> None: museum_number = MuseumNumber.of("K.1") line_number = LineNumber(1) line = ParallelFragment(cf, museum_number, duplicates, labels, line_number) assert line.has_cf is cf assert line.museum_number == museum_number assert line.has_duplicates is duplicates assert line.labels == labels assert line.line_number == line_number assert line.display_value == display_value assert line.atf == Atf(f"// {display_value}") assert line.lemmatization == (LemmatizationToken(display_value),) @pytest.mark.parametrize( "cf,chapter,display_value", [ ( True, ChapterName(Stage.OLD_BABYLONIAN, "my version", "my name"), 'cf. L I.1 OB "my version" "my name" 1', ), ( False, ChapterName(Stage.OLD_BABYLONIAN, "", "my name"), 'L I.1 OB "my name" 1', ), (False, None, "L I.1 1"), ], ) def test_parallel_text(cf, chapter, display_value) -> None: text_id = TextId(Genre.LITERATURE, 1, 1) line_number = LineNumber(1) line = ParallelText(cf, text_id, chapter, line_number) assert line.has_cf is cf assert line.text == text_id assert line.chapter == chapter assert line.line_number == line_number assert line.display_value == display_value assert line.atf == Atf(f"// {display_value}") assert line.lemmatization == (LemmatizationToken(display_value),) @pytest.mark.parametrize( "cf,display_value", [(True, "cf. (my name 1)"), (False, "(my name 1)")] ) def test_parallel_composition(cf, display_value) -> None: name = "my name" line_number = LineNumber(1) line = ParallelComposition(cf, name, line_number) assert line.has_cf is cf assert line.name == name assert line.line_number == line_number assert line.display_value == display_value assert line.atf == Atf(f"// {display_value}") assert line.lemmatization == (LemmatizationToken(display_value),)
""" Escreva um programa que peça nome e ano de nascimento do usuário, calcule a idade e imprima o resultado no formato abaixo. '<nome do usuário> tem <idade> anos.' """ from datetime import datetime nome = input('Nome: ') dia = input('Dia de nascimento: ') mes = input('Mês de nascimento: ') ano = input('Ano de nascimento: ') data_nasc = datetime(int(ano), int(mes), int(dia)) hoje = datetime.today() idade = hoje.year - data_nasc.year if hoje.month < data_nasc.month: idade -= 1 elif hoje.month == data_nasc.month and hoje.day < data_nasc.day: idade -= 1 print(nome, 'tem', idade, 'anos.')
# Generated by Django 3.1.6 on 2021-03-30 13:40 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('profiles', '0002_userprofile_is_restricted'), ] operations = [ migrations.RenameField( model_name='userprofile', old_name='is_restricted', new_name='is_unrestricted', ), ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # s_weak_dominance [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_weak_dominance&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=doc-s_weak_dominance). # + import numpy as np import scipy as sp import matplotlib.pyplot as plt from arpym.statistics import simulate_normal from arpym.tools import add_logo # - # ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_weak_dominance-parameters) mu_ = np.array([1, 0]) # mean vector of jointly normal variables sigma2_ = np.array([[1, 0], [0, 1]]) # covariance matrix j_ = 5000 # number of simulations # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_weak_dominance-implementation-step01): Calculate marginal cdfs and pdfs # get pdf and cdf of X_1 and X_2 llim = np.floor( min(mu_[0]-5*np.sqrt(sigma2_[0, 0]), mu_[1]-5*np.sqrt(sigma2_[1, 1])) ) ulim = np.ceil( max(mu_[0]+5*np.sqrt(sigma2_[0, 0]), mu_[1]+5*np.sqrt(sigma2_[1, 1])) ) x_grid = np.linspace(llim, ulim, 100) pdf_1 = sp.stats.norm.pdf(x_grid, mu_[0], np.sqrt(sigma2_[0, 0])) pdf_2 = sp.stats.norm.pdf(x_grid, mu_[1], np.sqrt(sigma2_[1, 1])) cdf_1 = sp.stats.norm.cdf(x_grid, mu_[0], np.sqrt(sigma2_[0, 0])) cdf_2 = sp.stats.norm.cdf(x_grid, mu_[1], np.sqrt(sigma2_[1, 1])) # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_weak_dominance-implementation-step02): Simulate values from X_1 and apply cdfs # + # simulate scenarios from X_1 x = simulate_normal(mu_, sigma2_, j_) x_1 = x[:, 0] # apply marginal cdfs to the samples cdf1_x1 = sp.stats.norm.cdf(x_1, mu_[0], sigma2_[0, 0]) cdf2_x1 = sp.stats.norm.cdf(x_1, mu_[1], sigma2_[1, 1]) # - # ## Plots # + # set figure specifications plt.style.use('arpm') f, ax = plt.subplots(1, 2, figsize=(1280.0/72.0, 720.0/72.0), dpi=72.0) # pdf comparison plt.sca(ax[0]) plt.plot(pdf_1, x_grid, lw=2, color='C0', label=r'$f_{X_{1}}(x)$') plt.plot(pdf_2, x_grid, lw=2, color='C3', label=r'$f_{X_{2}}(x)$') plt.xlabel('pdf', fontsize=17) plt.ylabel(r'$x$', fontsize=15, rotation='horizontal') plt.title('pdf comparison', fontsize=20, fontweight='bold') plt.legend(fontsize=17, borderpad=0.5, labelspacing=0.5) ax[0].spines['top'].set_visible(False) ax[0].spines['right'].set_visible(False) # cdf/quantile comparison plt.sca(ax[1]) plt.plot(cdf_1, x_grid, lw=2, color='C0', label=r'$F_{X_{1}}(x)$') plt.plot(cdf_2, x_grid, lw=2, color='C3', label=r'$F_{X_{2}}(x)$') plt.xlabel('cdf', fontsize=17) plt.title('cdf/quantile comparison', fontsize=20, fontweight='bold') plt.legend(fontsize=17, borderpad=0.5, labelspacing=0.5) ax[1].spines['top'].set_visible(False) ax[1].spines['right'].set_visible(False) add_logo(f, location=4, set_fig_size=False) plt.tight_layout() plt.show() plt.close(f) # weak dominance in terms of strong dominance # set figure specifications g = plt.figure(1, figsize=(1280.0/72.0, 720.0/72.0), dpi=72.0) ax_scatter = plt.axes([0.225, 0.305, 0.65, 0.65]) ax_histx = plt.axes([0.225, 0.1, 0.65, 0.2]) ax_histy = plt.axes([0.1, 0.305, 0.12, 0.65]) # scatter plot of cdf1_x1 vs cdf2_x1 ax_scatter.scatter(cdf1_x1[:200], cdf2_x1[:200], marker='.', label=r'cdf transforms applied to sample $\{x_{1}^{(j)}\}_{j=1}^{\bar{j}}\sim X_{1}$') ax_scatter.plot(range(2), range(2), lw=2, color='black') ax_scatter.legend(loc='upper left', fontsize=17, borderpad=0.5) ax_scatter.set_xticklabels([]) ax_scatter.set_yticklabels([]) ax_scatter.spines['top'].set_visible(False) ax_scatter.spines['right'].set_visible(False) # histogram of cdf1_x1 ax_histx.hist(cdf1_x1, bins=50, density=True, color='lightgray') ax_histx.set_xlabel(r'$F_{X_{1}}(X_{1}) \sim U[0,1]$', fontsize=17) ax_histx.tick_params(axis='x', which='major', labelsize=14) ax_histx.set_yticklabels([]) # histogram of cdf2_x1 ax_histy.hist(cdf2_x1, bins=50, density=True, color='lightgray', orientation='horizontal') ax_histy.set_ylabel(r'$F_{X_{2}}(X_{1}) \nsim U[0,1]$', fontsize=17) ax_histy.set_xticklabels([]) ax_histy.tick_params(axis='y', which='major', labelsize=14) add_logo(g, axis=ax_scatter, location=4, set_fig_size=False)
# __init__.py from .array_transformation_with_input_bounds import scale_array_with_input_bounds, normalise_array_with_input_bounds, \ transform_array_with_input_bounds from .dataset_handling import load_dataset, concatenate_datasets, save_dataset from .region_classification import return_index_if_value_in_region, convert_resampling_region_to_bounds, \ compute_region_classification_len, compute_regions_belongings from .retrieve_data_from_wandb import rebuild_trained_model_from_cloud from .type_check import ensure_numpy_array, ensure_tensor_array from .verfication_helpers import disregard_points_within_ball, get_radius_from_reference_point_statistical
import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.eager import context def cyclic_learning_rate(global_step, learning_rate=0.01, max_lr=0.1, step_size=20., gamma=0.99994, mode='triangular', name=None): """Applies cyclic learning rate (CLR). From the paper: Smith, Leslie N. "Cyclical learning rates for training neural networks." 2017. [https://arxiv.org/pdf/1506.01186.pdf] This method lets the learning rate cyclically vary between reasonable boundary values achieving improved classification accuracy and often in fewer iterations. This code varies the learning rate linearly between the minimum (learning_rate) and the maximum (max_lr). It returns the cyclic learning rate. It is computed as: ```python cycle = floor( 1 + global_step / ( 2 * step_size ) ) x = abs( global_step / step_size – 2 * cycle + 1 ) clr = learning_rate + ( max_lr – learning_rate ) * max( 0 , 1 - x ) ``` Polices: 'triangular': Default, linearly increasing then linearly decreasing the learning rate at each cycle. 'triangular2': The same as the triangular policy except the learning rate difference is cut in half at the end of each cycle. This means the learning rate difference drops after each cycle. 'exp_range': The learning rate varies between the minimum and maximum boundaries and each boundary value declines by an exponential factor of: gamma^global_step. Example: 'triangular2' mode cyclic learning rate. '''python ... global_step = tf.Variable(0, trainable=False) optimizer = tf.train.AdamOptimizer(learning_rate= clr.cyclic_learning_rate(global_step=global_step, mode='triangular2')) train_op = optimizer.minimize(loss_op, global_step=global_step) ... with tf.Session() as sess: sess.run(init) for step in range(1, num_steps+1): assign_op = global_step.assign(step) sess.run(assign_op) ... ''' Args: global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the cyclic computation. Must not be negative. learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate which is the lower bound of the cycle (default = 0.1). max_lr: A scalar. The maximum learning rate boundary. step_size: A scalar. The number of iterations in half a cycle. The paper suggests step_size = 2-8 x training iterations in epoch. gamma: constant in 'exp_range' mode: gamma**(global_step) mode: one of {triangular, triangular2, exp_range}. Default 'triangular'. Values correspond to policies detailed above. name: String. Optional name of the operation. Defaults to 'CyclicLearningRate'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The cyclic learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility """ if global_step is None: raise ValueError("global_step is required for cyclic_learning_rate.") with ops.name_scope(name, "CyclicLearningRate", [learning_rate, global_step]) as name: learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate") dtype = learning_rate.dtype global_step = math_ops.cast(global_step, dtype) step_size = math_ops.cast(step_size, dtype) def cyclic_lr(): """Helper to recompute learning rate; most helpful in eager-mode.""" # computing: cycle = floor( 1 + global_step / ( 2 * step_size ) ) double_step = math_ops.multiply(2., step_size) global_div_double_step = math_ops.divide(global_step, double_step) cycle = math_ops.floor(math_ops.add(1., global_div_double_step)) # computing: x = abs( global_step / step_size – 2 * cycle + 1 ) double_cycle = math_ops.multiply(2., cycle) global_div_step = math_ops.divide(global_step, step_size) tmp = math_ops.subtract(global_div_step, double_cycle) x = math_ops.abs(math_ops.add(1., tmp)) # computing: clr = learning_rate + ( max_lr – learning_rate ) * max( 0, 1 - x ) a1 = math_ops.maximum(0., math_ops.subtract(1., x)) a2 = math_ops.subtract(max_lr, learning_rate) clr = math_ops.multiply(a1, a2) if mode == 'triangular2': clr = math_ops.divide(clr, math_ops.cast(math_ops.pow(2, math_ops.cast( cycle-1, tf.int32)), tf.float32)) if mode == 'exp_range': clr = math_ops.multiply(math_ops.pow(gamma, global_step), clr) return math_ops.add(clr, learning_rate, name=name) if not context.executing_eagerly(): cyclic_lr = cyclic_lr() return cyclic_lr
from fastapi.testclient import TestClient from main import app client = TestClient(app) # test default def test_default(): response = client.get("/") assert response.status_code == 200 # test heroes def test_read_heroes(): response = client.get("/heroes") assert response.status_code == 200 def test_read_hero_drow(): response = client.get("/heroes/6") assert response.status_code == 200 assert response.json() == { "name": "npc_dota_hero_drow_ranger", "id": 6, "localized_name": "Drow Ranger" } def test_read_hero_bad_id(): response = client.get("/heroes/118") assert response.status_code == 404 # test items def test_read_items(): response = client.get("/items") assert response.status_code == 200 def test_read_item_blink(): response = client.get("/items/1") assert response.status_code == 200 assert response.json() == { "id": 1, "name": "item_blink", "cost": 2250, "secret_shop": 0, "side_shop": 0, "recipe": 0, "localized_name": "Blink Dagger" } def test_read_item_bad_id(): response = client.get("/items/400") assert response.status_code == 404 # test clusters def test_read_clusters(): response = client.get("/clusters") assert response.status_code == 200 def test_read_cluster_us_east(): response = client.get("/clusters/121") assert response.status_code == 200 assert response.json() == { "cluster": 121, "region": "US EAST" } def test_read_cluster_bad_id(): response = client.get("/clusters/1") assert response.status_code == 404 # test matches def test_read_matches(): response = client.get("/matches") assert response.status_code == 200 def test_read_matches_page(): response = client.get("/matches?sequence_number=4737088502") assert response.status_code == 200 def test_read_match_good(): response = client.get("/matches/5637985930") assert response.status_code == 200 assert response.json() == { "radiant_win": True, "duration": 3421, "pre_game_duration": 90, "start_time": 1601530008, "match_id": 5637985930, "match_seq_num": 4737088312, "tower_status_radiant": 1828, "tower_status_dire": 0, "barracks_status_radiant": 63, "barracks_status_dire": 0, "cluster": 156, "first_blood_time": 179, "lobby_type": 7, "human_players": 10, "leagueid": 0, "positive_votes": 0, "negative_votes": 0, "game_mode": 22, "flags": 1, "engine": 1, "radiant_score": 42, "dire_score": 58 } def test_read_match_bad(): response = client.get("/matches/1") assert response.status_code == 404 def test_read_match_players_good(): response = client.get("/matches/5637985930/players") assert response.status_code == 200 def test_read_match_players_bad(): response = client.get("/matches/1/players") assert response.status_code == 404 # test stats def test_read_stats_items(): response = client.get("/stats/items") assert response.status_code == 200 def test_read_stats_items_drow(): response = client.get("stats/items/6") assert response.status_code == 200 def test_read_stats_items_bad(): response = client.get("stats/items/118") assert response.status_code == 404 def test_read_stats_heroes(): response = client.get("/stats/heroes") assert response.status_code == 200 def test_read_stats_hero_drow(): response = client.get("/stats/heroes/6") assert response.status_code == 200 def test_read_stats_hero_bad(): response = client.get("/stats/heroes/118") assert response.status_code == 404 # test models def test_run_model_supervised(): response = client.get("model/supervised/1/2/3/4/5/6/7/8/9/10/team") assert response.status_code == 200 assert response.json() == { "r_wins": True } or { "r_wins": False } def test_run_model_supervised(): response = client.get("model/unsupervised/1/2/3/4/5/6/7/8/9/10/team") assert response.status_code == 200 assert response.json() == { "r_wins": True } or { "r_wins": False } def test_run_model_stacked(): response = client.get("model/stacked/1/2/3/4/5/6/7/8/9/10/team") assert response.status_code == 200 assert response.json() == { "r_wins": True } or { "r_wins": False }
""" Mixins to provide a paginated result set. :date_created: 2021-11-25 """ from typing import Generator, Union from do_py import DataObject, R from do_py.abc import ABCRestrictions from do_py.data_object.validator import Validator from db_able.base_model.database_abc import Database from db_able.client import DBClient from db_able.mgmt.const import PaginationType @ABCRestrictions.require('cursor_key') class ABCPagination(DataObject): """ Interface for nested pagination structures for use with PaginatedData. """ _is_abstract_ = True @classmethod def __compile__(cls): """ Extend compile-time checks to validate `cls.cursor_key` value in `cls._restrictions`. """ super(ABCPagination, cls).__compile__() assert cls.cursor_key in cls._restrictions, \ '{cls_name}.cursor_key="{cursor_key}" must be in {cls_name}._restrictions.'.format( cls_name=cls.__name__, cursor_key=cls.cursor_key ) assert 'has_more' in cls._restrictions or hasattr(cls, 'has_more'), \ '"has_more" must be defined in {cls_name}\'s restrictions or as an attribute'.format( cls_name=cls.__name__ ) assert 'after' in cls._restrictions or hasattr(cls, 'after'), \ '"after" must be defined in {cls_name}\'s restrictions or as an attribute'.format( cls_name=cls.__name__ ) class Pagination(ABCPagination): """ This design suffers from performance issues on large data sets: in MySQL, OFFSET walks through each row it skips. """ _restrictions = { 'page': R.INT.with_default(1), 'page_size': R.INT.with_default(10), 'total': R.INT, } cursor_key = 'page' @property def has_more(self) -> bool: """ :rtype: bool """ return self.page * self.page_size < self.total @property def after(self) -> int: """ :rtype: int """ return self.page + 1 class InfiniteScroll(ABCPagination, Validator): """ This design suffers from UX issues: Skipping through pages cannot be supported, only the next page is available. """ _restrictions = { 'after': R(), # Note: Does not handle encryption/decryption for external exposure. 'has_more': R.BOOL, # 'total': R.INT # Anti-pattern; InfiniteScroll is intended to be performant with large data sets. } cursor_key = 'after' def _validate(self): """ Validate that `self.after` is populated if `self.has_more` is True. """ if self.has_more: assert self.after is not None, 'Expected "after" to be populated when "has_more" is True.' class PaginatedData(Validator): """ Paginated data structure. """ _restrictions = { 'data': R.LIST, # _Listable DataObjects. 'pagination': R() # Pagination or InfiniteScroll DO; validated via `_validate` } def _validate(self): """ Validate `self.data` elements are `_Listable` implementation instances. Validate `self.pagination` is a `ABCPagination` implementation instance. """ assert all(isinstance(datum, _Listable) for datum in self.data), \ '`self.data` must be comprised of _Listable descendents.' assert isinstance(self.pagination, ABCPagination), \ '`self.pagination` type "%s" must be a descendent of `ABCPagination`.' % type(self.pagination) @ABCRestrictions.require('list_params', 'pagination_type', 'pagination_data_cls_ref') class _Listable(Database): """ This is an abstraction for `Paginated` and `Scrollable` mixins, designed to access DB with a standard classmethod action, `list`. Supplants bulk "R" of CRUD. There are two pagination designs: 1. Pagination, with Offset/limit paging implemented 2. Infinite Scroll, with "next page" design using an "after" cursor and "has_more" boolean. """ _is_abstract_ = True @classmethod def __compile__(cls): """ Extend compilation checks to validate defined params. """ super(_Listable, cls).__compile__() cls._validate_params('list_params') assert cls.pagination_type in PaginationType.allowed, 'Invalid pagination_type="%s".' % (cls.pagination_type,) assert ABCPagination in cls.pagination_data_cls_ref.mro(), \ 'Invalid pagination_data_cls_ref="%s".' % (cls.pagination_data_cls_ref,) @classmethod def yield_all(cls, **kwargs) -> Generator: """ Wrap `cls.list` to auto-paginate and provide a generator of all results. :param kwargs: refer to `cls.list_params` :rtype: Generator """ cursor_key = cls.pagination_data_cls_ref.cursor_key after = kwargs.pop(cursor_key, cls.pagination_data_cls_ref._restrictions[cursor_key].default) has_more = True while has_more: kwargs[cursor_key] = after paginated_data = cls.list(**kwargs) for datum in paginated_data.data: yield datum has_more = paginated_data.pagination.has_more after = paginated_data.pagination.after class Paginated(_Listable): """ Mixin to support standard pagination design, with offset/limit paging implementation. """ _is_abstract_ = True pagination_type = PaginationType.PAGINATION pagination_data_cls_ref = Pagination @classmethod def __compile__(cls): """ Extend compile-time checks to validate implementation does not use both Scrollable and Paginated. """ super(Paginated, cls).__compile__() assert Scrollable not in cls.mro(), '"Scrollable" and "Paginated" mixins are mutually exclusive.' @classmethod def list(cls, **kwargs) -> PaginatedData: """ List multiple `DataObject` in `PaginatedData` structure. Use `cls.list_params` as kwargs reference. Expects to call the stored procedure: '%s_list' % cls.__name__, i.e. 'MyDataObject_list' Example: >>> from db_able import Paginated, Params >>> from do_py import R >>> >>> class A(Paginated): >>> db = 'schema_name' >>> _restrictions = { >>> 'id': R.INT, >>> 'x': R.INT.with_default(0), >>> 'y': R.INT.with_default(1) >>> } >>> _extra_restrictions = { >>> 'limit': R.INT.with_default(10), >>> 'page': R.INT.with_default(1) >>> } >>> list_params = Params('limit', 'page') # version=2 allows versioning of the SP, i.e. `A_list_v2` >>> >>> a = A.list(limit=10) >>> list(A.yield_all(limit=10)) :param kwargs: refer to `cls.list_params` :rtype: PaginatedData """ stored_procedure = '%s_list%s' % (cls.__name__, cls.list_params.version) validated_args = cls.kwargs_validator(*cls.list_params, **kwargs) with DBClient(cls.db, stored_procedure, *validated_args) as conn: data = [cls(data=row) for row in conn.data] assert conn.next_set(), 'Expected 2 result sets from %s.%s' % (cls.db, stored_procedure) assert conn.data, 'No pagination data found in second result set from %s.%s' % (cls.db, stored_procedure) assert len(conn.data) == 1, \ 'Expected one row from pagination data result set from %s.%s' % (cls.db, stored_procedure) pagination = cls.pagination_data_cls_ref(data=conn.data[0]) return PaginatedData({ 'data': data, 'pagination': pagination }) @ABCRestrictions.require('to_after') class Scrollable(_Listable): """ Mixin to support Infinite Scroll pagination design. :attribute to_after: method to convert self into the appropriate cursor value for `list` stored procedure. """ _is_abstract_ = True pagination_type = PaginationType.INFINITE_SCROLL pagination_data_cls_ref = InfiniteScroll @classmethod def __compile__(cls): """ Extend compile-time checks to: 1. Validate implementation does not use both Scrollable and Paginated. 2. Validate limit restriction is defined. 3. Validate limit is defined in `list_params`. """ super(Scrollable, cls).__compile__() assert Paginated not in cls.mro(), '"Scrollable" and "Paginated" mixins are mutually exclusive.' assert 'limit' in cls._restrictions or 'limit' in cls._extra_restrictions, \ '"limit" restriction required for %s.' % cls.__name__ assert 'limit' in cls.list_params, '"limit" param required for %s.list_params' % cls.__name__ @classmethod def list(cls, **kwargs) -> PaginatedData: """ List multiple `DataObject` in `PaginatedData` structure. Use `cls.list_params` as kwargs reference. Expects to call the stored procedure: '%s_list' % cls.__name__, i.e. 'MyDataObject_list' Example: >>> from db_able import Scrollable, Params >>> from do_py import R >>> >>> class A(Scrollable): >>> db = 'schema_name' >>> _restrictions = { >>> 'id': R.INT, >>> 'x': R.INT.with_default(0), >>> 'y': R.INT.with_default(1) >>> } >>> _extra_restrictions = { >>> 'limit': R.INT.with_default(10), >>> 'after': R.NULL_STR >>> } >>> pagination_type = PaginationType.INFINITE_SCROLL >>> list_params = Params('limit', 'after') # version=2 allows versioning of the SP, i.e. `A_list_v2` >>> >>> a = A.list(limit=10) >>> list(A.yield_all(limit=10)) :param kwargs: refer to `cls.list_params` :rtype: PaginatedData """ stored_procedure = '%s_list%s' % (cls.__name__, cls.list_params.version) validated_args = cls.kwargs_validator(*cls.list_params, **kwargs) # Get limit + 1 to fetch one additional row for `has_more` business logic implementation. # Peeling out from validated_args is required to use restriction-defined default limit value. limit = None new_validated_args = [] for key, value in validated_args: if key == 'limit': new_arg = (key, value + 1) limit = value else: new_arg = (key, value) new_validated_args.append(new_arg) with DBClient(cls.db, stored_procedure, *new_validated_args) as conn: pagination = { 'has_more': len(conn.data) > limit, 'after': None } data = [] for row in conn.data: if len(data) < limit: obj = cls(data=row) data.append(obj) pagination['after'] = obj.to_after() else: break return PaginatedData({ 'data': data, 'pagination': cls.pagination_data_cls_ref(pagination) })
from bisect import bisect_left import logging import sys import inspect import traceback import datetime def Indentation(): return ' ' * len(inspect.stack()) # puts traceback into the log def log_assert(bool_, message, g_data): try: assert bool_, message except AssertionError: # construct an exception message from the code of the calling frame last_stackframe = inspect.stack()[-2] source_file, line_no, func = last_stackframe[1:4] source = "Traceback (most recent call last):\n" + \ ' File "%s", line %s, in %s\n ' % (source_file, line_no, func) source_code = open(source_file).readlines() source += "".join(source_code[line_no - 3:line_no + 1]) g_data.TraceError("%s\n%s" % (message, source)) raise AssertionError("%s\n%s" % (message, source)) # does binary search in a sorted alist # def BinarySearch(l, key, index = False): hi = len(l) lo = 0 pos = bisect_left(l,(key,None),lo,hi) if index: return pos if pos != hi and l[pos][0] == key: return l[pos][1] if pos != 0 and l[pos-1][0] == key: return l[pos-1][1] return None def DictToSortedTuple(d): return tuple(sorted(d.iteritems())) def ListInsert(dict, k, v): if k not in dict: dict[k] = [v] else: dict[k].append(v) def NotNone(arg1, arg2): return arg1 if not arg1 is None else arg2 def Median(nums): return sorted(nums)[len(nums)/2] def Differences(nums): nums = sorted(nums) result = [] for i in xrange(len(nums)-1): result.append(nums[i+1] - nums[i]) return result def exceptionTrace(exctype, value, tb): logger = logging.getLogger('TiaraBoom') logger.error('I seem to have crashed with an exception') logger.error('Type: %s' % str(exctype)) logger.error('Value: %s' % str(value)) logger.error('Traceback:\n%s' % "".join(traceback.format_tb(tb))) def ToWordForm(g_data,word,form): return [w for w, t in g_data.FamilyLookup(word) if t == form] def LKD(d,a,r=None): return d[a] if a in d else r def LKDI(d,a,r): try: return int(LKD(d,a,r)) except Exception: return r def LKDB(d,a,r): try: if str(LKD(d,a,r)).lower() == str(r).lower(): return r return not r except Exception: return r def LKDT(d,a): return LKD(d,a,True) def LKDF(d,a): return LKD(d,a,True) def LKD0(d,a): return LKD(d,a,0) def LKDS(d,a): return LKD(d,a,"") def LKDL(d,a): return LKD(d,a,[]) def SKD(a,d,r): if not a in d: d[a] = r def joinit(delimiter, iterable): it = iter(iterable) yield next(it) for x in it: yield delimiter yield x def FormatResponse(tweet,response): return '@' + tweet.GetUser().GetScreenName() + ": " + response def Const(x): return lambda g_data, args: x ConstTrue = Const(True) def GetURL(status): return "https://twitter.com/%s/status/%d" % (status.GetUser().GetScreenName(), status.GetId()) # looks like Fri Jan 02 03:14:31 +0000 2015 def TwitterTimestampToMySQL(ts): ts = ts.split() assert ts[0] in ["Mon","Tue","Wed","Thu","Fri","Sat","Sun"], ts mon = str(1 + ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"].index(ts[1])) if len(mon) == 1: mon = "0" + mon day = ts[2] time = ts[3] assert ts[4] == "+0000", ts year = ts[5] return "%s-%s-%s %s" % (year,mon,day,time) # looks like 2008-09-15 00:15:03 def MySQLTimestampToTwitter(msts): if msts == "0000-00-00 00:00:00": return None ts = MySQLTimestampToPython(msts) dow = ["Mon","Tue","Wed","Thu","Fri","Sat","Sun"][ts.weekday()] mon = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][ts.month-1] def pad(a): if len(a) == 1: return "0" + a return a day = pad("%d" % ts.day) hour= pad("%d" % ts.hour) minute = pad("%d" % ts.minute) second = pad("%d" % ts.second) year = "%d" % ts.year result = "%s %s %s %s:%s:%s +0000 %s" % (dow,mon,day,hour,minute,second,year) assert TwitterTimestampToMySQL(result) == msts, (result,msts,TwitterTimestampToMySQL(result)) return result def MySQLTimestampToPython(ts): return datetime.datetime.strptime(ts, "%Y-%m-%d %H:%M:%S") def OlderThan(ts,days): return (ts.now() - ts) > datetime.timedelta(days,0,0) def Decay(x): return max(0,3 - 0.5**(x-2)) def Int(x): if x is None: return None return int(x) def ImageURLToTuple(url): assert url.startswith("https://pbs.twimg.com/profile_images/"), url url = url[len("https://pbs.twimg.com/profile_images/"):] url = url.split("/") assert len(url) == 2, url assert url[1].endswith("_normal.jpg"), url return int(url[0]), url[1][:-len("_normal.jpg")] def TupleToImageURL(imid, hsh): if str(imid) == "0": return "https://abs.twimg.com/sticky/default_profile_images/default_profile_2_normal.png" return "https://pbs.twimg.com/profile_images/" + str(imid) + "/" + hsh + "_normal.jpg"
def aumentar(moeda, taxa, formato=False): aum = moeda + (moeda*taxa/100) #return format(aum) torna todas as formatações válidas sem escolher se quer ou não formatar return aum if formato is False else format(aum) def diminuir(moeda, taxa, formato=False): dim = moeda - (moeda * taxa/100) #return format(dim) if formato is False: return dim else: return format(dim) def dobro(moeda, formato=False): dob = moeda * 2 #return format(dob) if not formato: return dob else: return format(dob) def metade(moeda, formato=False): met = moeda / 2 #return format(met) if formato == False: return met else: return format(met) def format(preço = 0, moeda= 'R$'): return f'{moeda}{preço:.2f}'.replace('.', ',')
n, k, s = map(int, input().split()) a = [s] * k a += [9857349] * (n - k) print(*a, sep=' ')
# https://oj.leetcode.com/problems/letter-combinations-of-a-phone-number/ class Solution: # @return a list of strings, [s1, s2] def letterCombinations(self, digits): result = [''] letters = ["abc","def","ghi","jkl","mno","pqrs","tuv","wxyz"] for i in xrange(len(digits)): nextResult, digit = [], int(digits[i]) for j in xrange(len(result)): if digit > 1 and digit <= 9: for letter in letters[digit-2]: nextResult.append(result[j] + letter) else: # add the original strings nextResult.append(result[j]) # get new result result = nextResult return result s = Solution() print s.letterCombinations("23"), ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]
__author__ = 'mpetyx' import logging from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponse from pyapi import API import requests @csrf_exempt def transform(request): if request.method == 'POST' or request.method == 'GET': # api = request.POST.get('api','') location = request.GET.get('location', '') original_format = request.GET.get('original_format', '') to_format = request.GET.get('to_format', '') # location = "http://imagine.epu.ntua.gr:1988/api/doc/schema/Account/" # original_format = "swagger" # to_format = "raml" # api = "123" if "api-docs.json" in location: # openi_server_url = "http://imagine.epu.ntua.gr:1988/api/doc/resources/" openi_server_url = "http://api-builder.tools.epu.ntua.gr/web/api-docs/Core/api-docs.json" schema = "http://api-builder.tools.epu.ntua.gr/web/api-docs/Core" server = requests.get(location) objects = server.json()['apis'] schema = server.json()['basePath'] apis = [] api_framework = API() # language = request.GET.get('to_format', '') # serialisation_format = request.GET.get("serialisation_format", '') for object in objects: logging.info("Accessing Object: " + str(schema + object['path'])) api_framework.parse(location=schema + object['path'], language="swagger") apis.append(api_framework.serialise(language=to_format)) return HttpResponse(apis, status=201) else: api_framework = API() api_framework.parse(location=location, language=original_format) api_framework.serialise(to_format) if not location: return HttpResponse({"Please provide a Valid API!!"}, status=401) else: return HttpResponse(api_framework.serialise(to_format), status=201) else: return HttpResponse(status=405) @csrf_exempt def openi(request): if request.method == 'POST' or request.method == 'GET': # openi_server_url = "http://imagine.epu.ntua.gr:1988/api/doc/resources/" openi_server_url = "http://api-builder.tools.epu.ntua.gr/web/api-docs/Core/api-docs.json" schema = "http://api-builder.tools.epu.ntua.gr/web/api-docs/Core" server = requests.get(openi_server_url) objects = server.json()['apis'] apis = [] api_framework = API() language = request.GET.get('to_format', '') serialisation_format = request.GET.get("serialisation_format", '') if serialisation_format == '': serialisation_format = None for object in objects: logging.info("Accessing Object: " + str(schema + object['path'])) api_framework.parse(location=schema + object['path'], language="swagger") apis.append(api_framework.serialise(language=language, format=serialisation_format)) return HttpResponse(apis, status=201) else: return HttpResponse(status=405)
_base_ = [ '../spos/spos_subnet_shufflenetv2_8xb128_in1k.py', ]
# Author: Eric Alcaide import torch import torch.nn.functional as F from einops import rearrange, repeat # models from clynmut.utils import * import alphafold2_pytorch.utils as af2utils # import esm # after installing esm # (e-)swish activation(s) # https://arxiv.org/abs/1801.07145 class e_Swish_(torch.nn.Module): def forward(self, x, beta=1.1): return beta * x * x.sigmoid() SiLU = e_Swish_ class Net_3d(torch.nn.Module): """ Gets an embedding from a 3d structure. Not an autoencoder, just a specific encoder for this usecase. Will likely use GVP or E(n)-GNN: https://github.com/lucidrains/geometric-vector-perceptron/ """ def __init__(self): return def forward(self, coords, cloud_mask): """ Gets an embedding from a 3d structure. """ return class Hier_CLF(torch.nn.Module): """ Hierarchical classification/regression module. """ def __init__(self, hier_graph={}, hidden_dim=None): self.hier_graph = hier_graph self.hier_scaff = Hier_Helper(hier_graph) self.hidden_dim = hidden_dim self.arch = [] # build node MLPs for i,node in enumerate(self.hier_scaff.nodes): dims_in = self.hier_scaf.max_width if i!=0 else self.hidden_dim dims_out = self.hier_scaf.max_width self.arch.append({"class": node["class"], "hidden": torch.nn.Sequential( torch.nn.Linear(dims_in, dims_out), SiLU(), ), "clf": torch.nn.Sequential( torch.nn.Linear(dims_out, dims_out) ) }) def forward(self, x, pred_format="dict"): """ The custom architecture for a hierarchical classification. Defines the MLPs and final gaussian processes for each node. Inputs: * x: (batch, hidden) tensor * pred_format: one of ["dict", "tensor"] """ full_pred = self.hier_scaff.dag(x, self.arch) if pred_format == "dict": pred_dict = self.hier_scaff.full2dict(full_pred) return full_pred class MutPredict(torch.nn.Module): def __init__(self, seq_embedd_dim = 1280, # struct_embedd_dim = 256, seq_reason_dim = 128, struct_reason_dim = 128, hier_graph = {}, dropout = 0.0, use_msa = False, msa_max_seq = 256, # max number of MSA sequences to read. device = None): """ Predicts the phenotypical impact of mutations. """ self.seq_embedd_dim = seq_embedd_dim self.seq_reason_dim = seq_reason_dim self.struct_embedd_dim = struct_embedd_dim self.struct_reason_dim = struct_reason_dim # take same value for the 3 parts if no list is passed. self.dropout = [dropout]*3 if isinstance(dropout, float) else dropout self.hier_graph = hier_graph # nlp arch - no gradients here self.use_msa = use_msa self.msa_max_seq = msa_max_seq if use_msa: ##  alternatively do # import esm # after installing esm # embedd_model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S() embedd_model, alphabet = torch.hub.load("facebookresearch/esm", "esm1b_t33_650M_UR50S") batch_converter = alphabet.get_batch_converter() else: ##  alternatively do # embedd_model, alphabet = esm.pretrained.esm_msa1_t12_100M_UR50S() embedd_model, alphabet = torch.hub.load("facebookresearch/esm", "esm_msa1_t12_100M_UR50S") batch_converter = alphabet.get_batch_converter() self.nlp_stuff = [embedd_model, alphabet, batch_converter] self.seq_embedder = partial(embedd_seq_batch, embedd_model=embedd_model, batch_converter=batch_converter) # 3d module self.struct_embedder = Net_3d() # reasoning modules self.nlp_mlp = torch.nn.Sequential( torch.nn.Linear(2*seq_embedd_dim, seq_reason_dim*2), torch.nn.Dropout(self.dropout[0]), SiLU(), torch.nn.Linear(seq_reason_dim * 2, seq_reason_dim), torch.nn.Dropout(self.dropout[0]), SiLU(), ) self.struct_mlp = torch.nn.Sequential( torch.nn.Linear(struct_reason_dim, struct_reason_dim*2), torch.nn.Dropout(self.dropout[1]), SiLU(), torch.nn.Linear(struct_reason_dim * 2, struct_reason_dim), torch.nn.Dropout(self.dropout[1]), SiLU(), ) self.common_mlp = torch.nn.Sequential( torch.nn.Linear(struct_reason_dim + seq_reason_dim, struct_reason_dim + seq_reason_dim), torch.nn.Dropout(self.dropout[-1]), SiLU() ) # classifier self.hier_clf = Hier_CLF(hier_graph, hidden_dim=struct_reason_dim+seq_reason_dim) return def forward(self, seqs, msa_routes=None, coords=None, cloud_mask=None, pred_format="dict", info=None, verbose=0): """ Predicts the mutation effect in a protein. Inputs: * seqs: (2, b) list of pairs (wt and mut) of strings. Sequences in 1-letter AA code. * msas: (2, b) list of pairs (wt and mut) of routes to msa files . * coords: (b, l, c, 3) coords array in sidechainnet format * cloud_mask: (b, l, c) boolean mask on actual points from coords * pred_format: one of ["dict", "tensor"] * info: any info required. * verbose: int. verbosity level (0-silent, 1-minimal, 2-full) """ scaffold = torch.zeros(len(seqs), self.seq_reason_dim+self.struct_reason_dim) # NLP # MSATransformer if possible if msas is not None: wt_seq_data = [ af2utils.read_msa( filename=msa_route, nseq=self.msa_max_seq ) \ for msa_route in msa_routes[0]] mut_seq_data = [ af2utils.read_msa( filename=msa_route, nseq=self.msa_max_seq ) \ for msa_route in msa_routes[1]] else: wt_seq_data, mut_seq_data = None, None wt_seq_embedds = self.seq_embedder(seqs[0], wt_seq_data) # (batch, embedd_size) mut_seq_embedds = self.seq_embedder(seqs[1], mut_seq_data) # (batch, embedd_size) # reason the embedding seq_embedds = torch.cat([wt_seq_embedds, mut_seq_embedds], dim=-1) scaffold[:, :-self.struct_reason_dim] = self.nlp_mlp(seq_embedds) # 3D # only do if passed if coords is not None and cloud_mask is not None: struct_embedds = self.struct_embedder(coords, cloud_mask) scaffold[:, -self.struct_reason_dim:] = self.struct_mlp(struct_embedds) # common step x = self.common_mlp(scaffold) return self.hier_clf(x, pred_format=pred_format) def __repr__(self): return "ClynMut model with following args: "+str(self.__dict__)
#========================================================================= # This script creates LISA DWD galaxies across 15 metallicity bins, # incorporating the metallicity-dependent binary fraction as # discussed in Thiele et al. (2021). # # Authors: Sarah Thiele & Katelyn Breivik # Last updated: Oct 14th, 2021 #========================================================================= import numpy as np from astropy import constants as const from astropy import units as u import astropy.coordinates as coords from astropy.time import Time import argparse import postproc as pp DWD_list = ['He_He', 'CO_He', 'CO_CO', 'ONe_X'] dat_path = "../data/" FIRE_path = "../data/" models = ['fiducial', 'alpha25', 'alpha5', 'q3'] interfile = False nproc = 4 for model in models: pp.save_full_galaxy( DWD_list, dat_path, FIRE_path, dat_path, interfile, model, nproc ) print('Gx done!') if model == 'fiducial': pp.get_formeff( pathtodat=dat_path, pathtosave=dat_path ) print('formation efficiency done') pp.get_interactionsep_and_numLISA( pathtodat=dat_path, pathtosave=dat_path, model=model, var=True ) print('interaction sep FZ done') pp.get_interactionsep_and_numLISA( pathtodat=dat_path, pathtosave=dat_path, model=model, var=False ) print('interaction sep F50 done') pp.get_resolvedDWDs( dat_path, dat_path, var=True, model=model, window=1000 ) print('resolved FZ done') pp.get_resolvedDWDs( dat_path, dat_path, var=False, model=model, window=1000 ) print('resolved F50 done')
""" 这是我的作业 """ #算数运算符 a=21 b=10 c=0 c=a+b print("Line1-Value of c is" ,c) c=a-b print ("Line2-Value of c is" ,c) c=a*b print("Line3-Value of c is",c) c=a/b print("Line4-Value of c is",c) c=a%b print ("Line5-Value of c is",c) a=2 b=3 c=a**b print("Line6-Value of c is",c) a=11 b=5 c=a//b print("Line7-Value of c is",c) #比较运算符 print(2!=9)
''' Created on Oct 3, 2021 @author: immanueltrummer ''' import openai class CodeGenerator(): """ Generates code using Codex. """ def __init__(self, prompts): """ Initializes search space. Args: prompts: JSON object configuring prompts """ self.prompts = prompts def generate( self, context, p_type, schema, files, from_lang, to_lang, task, use_examples=True, tactics_p=None, strategy=None): """ Generate a piece of code solving specified task. Args: context: text snippets for prompt prefix p_type: task type ('query' vs. 'transform') schema: JSON description of database schema files: names of files storing tables from_lang: query language to_lang: query processing language use_examples: whether to use example queries task: task description in source language tactics_p: assigns each tactics to priority strategy: high-level processing strategy """ print(f'Generating code {p_type} from {from_lang} to {to_lang}') if from_lang == to_lang: return task elif to_lang == 'dummy': return '' from_lang = 'from_' + from_lang to_lang = 'to_' + to_lang sample_parts = [] if use_examples: sample_dbs = self.prompts['sample_databases'] if from_lang in self.prompts[p_type]: from_content = self.prompts[p_type][from_lang] else: from_content = self.prompts[p_type]['from_*'] sample_tasks = from_content['sample_tasks'] solution_links = from_content[to_lang]['sample_solution_links'] sample_solutions = [] for l in solution_links: with open(l) as file: sample_solution = file.read() sample_solutions.append(sample_solution) for sample_task, solution in zip(sample_tasks, sample_solutions): sample_text = sample_task['task'] sample_db_id = sample_task['db_id'] sample_db = sample_dbs[sample_db_id] sample_tables = sample_db['table_names_original'] sample_files = [f'{t}.csv' for t in sample_tables] sample_prompt = self._prompt( p_type, sample_db, sample_files, from_lang, to_lang, sample_text, tactics_p, strategy) sample_parts.append(sample_prompt) sample_parts.append(solution) # last_prompt = self._prompt( # p_type, schema, files, from_lang, # to_lang, task, tactics_p, strategy) # prompt = '\n'.join(context) + \ # '\n'.join(sample_parts[0:2]) + \ # '\n' + last_prompt last_prompt = self._prompt( p_type, schema, files, from_lang, to_lang, task, tactics_p, strategy) prompt = '\n'.join(context) + \ '\n'.join(sample_parts) + \ '\n' + last_prompt snippets = self._snippets(p_type, from_lang, to_lang) marker = snippets['marker'] completion = self._complete(prompt, marker) return completion.replace(marker, '') def _complete(self, prompt, marker): """ Complete prompt using Codex. Args: prompt: initiate generation with this prompt marker: generation stops at marker text Returns: generated code, following prompt """ try: print(f'\nPrompt:\n*******\n{prompt}\n*******') response = openai.Completion.create( engine='davinci-codex', prompt=prompt, temperature=0, max_tokens=400, stop=marker) return response['choices'][0]['text'] except Exception as e: print(f'Error querying Codex: {e}') return '' def _db_info(self, schema, files): """ Generate description of database. Args: schema: description of database schema files: names to files storing tables Returns: list of description lines """ lines = [] tables = schema['table_names_original'] all_columns = schema['column_names_original'] nr_tables = len(tables) for tbl_idx in range(nr_tables): filename = files[tbl_idx] tbl_name = tables[tbl_idx] tbl_columns = [c[1] for c in all_columns if c[0] == tbl_idx] col_list = ', '.join(tbl_columns) line = f'Table {tbl_name} with columns {col_list}, ' \ f'stored in {filename}.' lines.append(line) return lines def _eligible_tactics(self, tactics, precedence, used): """ Determine eligible next tactics. Args: tactics: available tactics precedence: precedence constraints used: tactics already used Returns: list of usable tactics IDs """ nr_tactics = len(tactics) usable = set(range(nr_tactics)) usable = usable.difference(used) for c in precedence: if c['F'] not in used: usable.discard(c['S']) return usable def _plan(self, tactics, precedence, tactics_p): """ Generate list of ordered tactics. Args: tactics: list of available tactics precedence: ordering constraints tactics_p: priorities for tactics Returns: ordered list of tactics """ ordered_ts = [] used = set() while (self._eligible_tactics(tactics, precedence, used)): usable = self._eligible_tactics(tactics, precedence, used) use = max(usable, key=lambda t_id:tactics_p[t_id]) used.add(use) if tactics_p[use] > 0: ordered_ts.append(tactics[use]) # attach default steps ordered_ts = ['Load data for all relevant tables.'] + ordered_ts ordered_ts = ordered_ts + ["Write query results to file 'result.csv'."] return ordered_ts def _prompt( self, p_type, schema, files, from_lang, to_lang, task, tactics_p=None, strategy=None): """ Generate a prompt initiating code generation. Args: p_type: task type ('query' vs. 'transform') schema: JSON description of database schema files: names of files storing tables from_lang: query language to_lang: query processing language task: task description in source language tactics_p: assigns each tactics to priority strategy: high-level processing strategy """ print(f'Prompt for {p_type} from {from_lang} to {to_lang}') if to_lang == 'to_dummy': return '' tactics = self.prompts[p_type]['tactics'] precedence = self.prompts[p_type]['precedence'] snippets = self._snippets(p_type, from_lang, to_lang) nr_tactics = len(tactics) if tactics_p is None: tactics_p = [1] * nr_tactics if strategy is None: strategy = '' line_pre = snippets['linepre'] ordered_ts = self._plan(tactics, precedence, tactics_p) plan_lines = [f'{l_id+1}. {l}' for l_id, l in enumerate(ordered_ts)] plan = '\n'.join([line_pre + t for t in plan_lines]) plan = plan.replace('<strategy>', strategy) db_lines = self._db_info(schema, files) db_info = '\n'.join([line_pre + l for l in db_lines]) prompt = snippets['template'] prompt = prompt.replace('<plan>', plan) prompt = prompt.replace('<task>', task) prompt = prompt.replace('<database>', db_info) return prompt def _snippets(self, p_type, from_lang, to_lang): """ Return snippets for most specific source language. Args: p_type: type of prompt (i.e., processing stage) from_lang: translate query from this language to_lang: execute query using this language Returns: prompt snippets for specified source language or generalization """ if from_lang in self.prompts[p_type]: return self.prompts[p_type][from_lang][to_lang] else: return self.prompts[p_type]['from_*'][to_lang] if __name__ == '__main__': generator = CodeGenerator('config/spaces.json') print(generator.space) print(generator.space['query']['from_nl']['to_cpp']['sample_solutions'])
import threading import queue import can import time q_logs = queue.Queue() class Listener(threading.Thread): def __init__(self, end_flag): threading.Thread.__init__(self) self.ende=end_flag self.bus = can.interface.Bus("vcan0", bustype="socketcan_native") def run(self): while not self.ende.isSet(): mesg=self.bus.recv(0) # print(mesg) q_logs.put(mesg) class Printer(threading.Thread): def __init__(self,logfile, end_flag): threading.Thread.__init__(self) self.ende=end_flag self.logfile = logfile print(logfile) def run(self): while not self.ende.isSet(): while not q_logs.empty(): mesg=q_logs.get() # print(mesg) if mesg != None: self.logfile.write(str(mesg)) self.logfile.write("\n") class csvPrinter(threading.Thread): def __init__(self,logfile,names, end_flag): threading.Thread.__init__(self) self.ende=end_flag self.logfile = logfile self.logfile.write(','.join(names) + "\n") def run(self): while not self.ende.isSet(): while not q_logs.empty(): msg=q_logs.get() # print(mesg) if msg != None: arg_list = [msg.timestamp, msg.arbitration_id, msg.dlc, msg.data] print (msg.data) row = ','.join(map(str,([msg.timestamp, msg.arbitration_id, msg.dlc, msg.data[0], msg.data[1] ] ))) self.logfile.write(row + "\n") if __name__ == '__main__': end_Flag = threading.Event() logs = open('test.csv', 'w') names = ["acc", "temp", "gyro"] Listen_Thread = Listener(end_Flag) Print_Thread = csvPrinter(logs,names, end_Flag) Listen_Thread.start() Print_Thread.start() time.sleep(10) end_Flag.set() Print_Thread.join() logs.close()
import pytest from rubrix.server.commons.errors import ForbiddenOperationError from rubrix.server.security.model import User def test_check_user_teams(): a_team = "A-team" expected_teams = [a_team, "B-team"] user = User(username="test-user", teams=[a_team, "B-team", "C-team"]) assert user.check_team(a_team) == a_team assert user.check_teams(expected_teams) == expected_teams with pytest.raises(ForbiddenOperationError): assert user.check_teams(["not-found-team"]) def test_default_team(): user = User(username="admin") assert user.default_team == None test_user = User(username="test", teams=["team"]) assert test_user.default_team == test_user.username @pytest.mark.parametrize( "teams, expected", [ (None, []), (["a"], ["a", "user"]), ([], ["user"]), ], ) def test_check_team_with_default(teams, expected): user = User(username="user", teams=teams) assert user.check_teams([]) == expected
# -*- coding: utf-8 -*- # Generated by Django 1.9.9 on 2018-01-05 01:20 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('private_sharing', '0007_auto_20171220_2038'), ] operations = [ migrations.CreateModel( name='FeaturedProject', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('description', models.TextField(blank=True)), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='private_sharing.DataRequestProject')), ], ), ]
# import the necessary packages import numpy as np import cv2 #defining prototext and caffemodel paths def load_model(prototextPath, caffeModel): return cv2.dnn.readNet(prototextPath, caffeModel) def get_detection(face_detector, image): (h,w) = image.shape[:2] # blobImage convert RGB (104.0, 177.0, 123.0) blob = cv2.dnn.blobFromImage(cv2.resize(image,(300,300)),1.0,(300,300),(104.0, 177.0, 123.0)) face_detector.setInput(blob) detections = face_detector.forward() return detections def get_face_pixels(image, detections, threshold=0.5): # loop over the detections (h,w) = image.shape[:2] bb_pixels = [] bb_pixels_coord = [] for i in range(0, detections.shape[2]): # extract the confidence and prediction confidence = detections[0, 0, i, 2] # filter detections by confidence greater than the minimum confidence if confidence > threshold: # compute the (x, y)-coordinates of the bounding box for the # object box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (startX, startY, endX, endY) = box.astype("int") # ensure the bounding boxes fall within the dimensions of # the frame (startX, startY) = (max(0, startX), max(0, startY)) (endX, endY) = (min(w - 1, endX), min(h - 1, endY)) pixels = image[startY:endY, startX:endX, :] pixels = cv2.cvtColor(pixels, cv2.COLOR_BGR2RGB) bb_pixels.append(pixels) bb_pixels_coord.append([startX, startY, endX, endY]) return bb_pixels_coord, bb_pixels
""" Copyright (c) 2015-present, Philippine-California Advanced Research Institutes- The Village Base Station Project (PCARI-VBTS). All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. """ from .base import * DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'databases/pcari.db'), }, 'vbts_subscribers': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': '/var/lib/asterisk/sqlite3dir/sqlite3.db', } } STATIC_ROOT = 'static'
import torch class TLayer(torch.nn.Module): def __init__(self, K=3, bias=False): super(TLayer, self).__init__() self.weight = torch.nn.Parameter(data=torch.eye(K, dtype=torch.float, requires_grad=True), requires_grad=True) if bias: self.bias = torch.nn.Parameter(data=torch.eye(K, dtype=torch.float, requires_grad=True), requires_grad=True) else: self.register_parameter('bias', None) self.init_parameters() def init_parameters(self): if self.bias is not None: self.weight.data.fill_(0) def forward(self, x): return torch.matmul(x, self.weight) + self.bias
# Generated by Django 3.1.4 on 2020-12-31 05:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('index', '0002_auto_20201231_0522'), ] operations = [ migrations.AlterField( model_name='image_to_json', name='data', field=models.JSONField(default={}), ), ]
from seesaw.metrics import * def test_average_precision(): # perfect case AP = average_precision(np.array([0, 1, 2]), nseen=10, npositive=3) assert AP == 1.0 # nothing found case AP = average_precision(np.array([]), nseen=10, npositive=3) assert AP == 0.0 # perfect case one elt AP = average_precision(np.array([0]), nseen=10, npositive=1) assert AP == 1.0 # imperfect case: missing some AP_0 = average_precision(np.array([0, 1, 2]), nseen=10, npositive=10) assert AP_0 == 3.0 / 10 # imperfect case: some false positives first AP_1 = average_precision(np.array([1, 2, 3]), nseen=10, npositive=3) assert AP_1 == (1.0 / 2 + 2.0 / 3 + 3.0 / 4) / 3.0 # both kinds of imperfections: AP_01 = average_precision(np.array([1, 2, 3]), nseen=10, npositive=10) assert AP_01 == (1.0 / 2 + 2.0 / 3 + 3.0 / 4) / 10.0 assert AP_01 < AP_0 assert AP_01 < AP_1 def test_average_precision_max_results(): AP = average_precision(np.array([0, 1, 2]), nseen=10, npositive=3, max_results=1) assert AP < 1 nAP = normalizedAP(np.array([0, 1, 2]), nseen=10, npositive=3, max_results=1) assert nAP == 1.0 nAP = normalizedAP(np.array([0, 1, 2]), nseen=10, npositive=3, max_results=2) assert nAP == 1.0 nAP = normalizedAP(np.array([0, 1, 2]), nseen=10, npositive=3, max_results=3) assert nAP == 1.0 nAP = normalizedAP(np.array([0, 1, 2]), nseen=10, npositive=3, max_results=4) assert nAP == 1.0 def test_ndcg(): ndcg = ndcg_score(np.array([0, 1, 2]), nseen=10, npositive=3) assert ndcg == 1.0 ndcg = ndcg_score(np.array([]), nseen=10, npositive=3) assert ndcg == 0.0 # perfect case one element ndcg = ndcg_score(np.array([0]), nseen=10, npositive=1) assert ndcg == 1.0 # imperfect case: missing some ndcg_0 = ndcg_score(np.array([0, 1, 2]), nseen=10, npositive=4) assert ndcg_0 < 1.0 # imperfect case: not first ndcg_1 = ndcg_score(np.array([1, 2, 3]), nseen=10, npositive=3) assert ndcg_1 < 1.0 # imperfect case: both ndcg_01 = ndcg_score(np.array([1, 2, 3]), nseen=10, npositive=4) assert ndcg_01 < ndcg_0 assert ndcg_01 < ndcg_1 # unnormalized. check index 0 is handled properly dcg = dcg_score(np.array([0])) assert dcg == 1.0 def test_rank_of_kth(): tt = rank_of_kth(np.array([0]), k=1) assert tt == 1.0 tt = rank_of_kth(np.array([]), k=1) assert tt == math.inf tt = rank_of_kth(np.array([0]), k=2) assert tt == math.inf tt = rank_of_kth(np.array([1, 2, 3]), k=1) assert tt == 2 tt = rank_of_kth(np.array([1, 2, 3]), k=2) assert tt == 3
def pattern_fourteen(strings): '''Pattern fourteen K A T H M A N D U ''' if not str(strings).isalpha(): strings = str(strings) # If provided is integer then converting to string def method_one(strings): for x in range(len(strings)): print('{}{}'.format(' ' * x, strings[x])) def method_two(strings): for x in range(len(strings)): print(strings[x].rjust(x + 1)) print('Method One\n') method_one(strings) print('\n\nMethod Two\n') method_two(strings) if __name__ == '__main__': try: pattern_fourteen('KATHMANDU') except NameError: print('String or Integer was expected')
from distutils.core import setup from setuptools import find_packages desc = """Declarative processing, transforming, and validating of data. New in 1.0.5: - Enhanced error handling with workers and Streams (issue 14) """ kwargs = { "name": "fulford.data", "description": desc, "author": "James Patrick Fulford", "author_email": "[email protected]", "url": "https://github.com/jamesfulford/fulford.data", "license": "Apache-2.0", "version": "1.0.5", "packages": find_packages() } setup( **kwargs )
from fred import Fred from dotenv import load_dotenv import os import series_id_defs load_dotenv() fr = Fred(api_key=os.getenv("FRED_API_KEY"), response_type="json") def generate_interest_rate(start_date, country): params = { "observation_start": start_date, } indicator_data = {} if country == "Switzerland": indicator_data = fr.series.observations(series_id_defs.SWITZERLAND["interest_rate_id"], params=params) elif country == "Euro": indicator_data = fr.series.observations(series_id_defs.EURO["interest_rate_id"], params=params) elif country == "Canada": indicator_data = fr.series.observations(series_id_defs.CANADA["interest_rate_id"], params=params) elif country == "USA": indicator_data = fr.series.observations(series_id_defs.USA["interest_rate_id"], params=params) elif country == "Australia": indicator_data = fr.series.observations(series_id_defs.AUSTRALIA["interest_rate_id"], params=params) elif country == "Japan": indicator_data = fr.series.observations(series_id_defs.JAPAN["interest_rate_id"], params=params) elif country == "UK": indicator_data = fr.series.observations(series_id_defs.UK["interest_rate_id"], params=params) elif country == "New Zealand": indicator_data = fr.series.observations(series_id_defs.NEW_ZEALAND["interest_rate_id"], params=params) else: raise Exception("No interest rate data for", country) interest_rate = [] new_observations = indicator_data["observations"] for i in range(len(new_observations)): interest_rate.append({"date": new_observations[i]["date"], "value": new_observations[i]["value"]}) return interest_rate def generate_cpi(start_date, country): params = { "observation_start": start_date, } indicator_data = {} if country == "Switzerland": indicator_data = fr.series.observations(series_id_defs.SWITZERLAND["cpi_id"], params=params) elif country == "Euro": indicator_data = fr.series.observations(series_id_defs.EURO["cpi_id"], params=params) elif country == "Canada": indicator_data = fr.series.observations(series_id_defs.CANADA["cpi_id"], params=params) elif country == "USA": indicator_data = fr.series.observations(series_id_defs.USA["cpi_id"], params=params) elif country == "Australia": indicator_data = fr.series.observations(series_id_defs.AUSTRALIA["cpi_id"], params=params) elif country == "Japan": indicator_data = fr.series.observations(series_id_defs.JAPAN["cpi_id"], params=params) elif country == "UK": indicator_data = fr.series.observations(series_id_defs.UK["cpi_id"], params=params) elif country == "New Zealand": indicator_data = fr.series.observations(series_id_defs.NEW_ZEALAND["cpi_id"], params=params) else: raise Exception("No cpi data for", country) cpi = [] new_observations = indicator_data["observations"] for i in range(len(new_observations)): cpi.append({"date": new_observations[i]["date"], "value": new_observations[i]["value"]}) return cpi def generate_gdp(start_date, country): params = { "observation_start": start_date, } indicator_data = {} if country == "Switzerland": indicator_data = fr.series.observations(series_id_defs.SWITZERLAND["gdp_id"], params=params) elif country == "Euro": indicator_data = fr.series.observations(series_id_defs.EURO["gdp_id"], params=params) elif country == "Canada": indicator_data = fr.series.observations(series_id_defs.CANADA["gdp_id"], params=params) elif country == "USA": indicator_data = fr.series.observations(series_id_defs.USA["gdp_id"], params=params) elif country == "Australia": indicator_data = fr.series.observations(series_id_defs.AUSTRALIA["gdp_id"], params=params) elif country == "Japan": indicator_data = fr.series.observations(series_id_defs.JAPAN["gdp_id"], params=params) elif country == "UK": indicator_data = fr.series.observations(series_id_defs.UK["gdp_id"], params=params) elif country == "New Zealand": indicator_data = fr.series.observations(series_id_defs.NEW_ZEALAND["gdp_id"], params=params) else: raise Exception("No gdp data for", country) gdp = [] new_observations = indicator_data["observations"] for i in range(len(new_observations)): gdp.append({"date": new_observations[i]["date"], "value": new_observations[i]["value"]}) return gdp
from keras.models import Sequential from keras import layers import pandas as pd from sklearn.model_selection import train_test_split import numpy as np from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import LabelEncoder from keras.models import Sequential from keras import layers from sklearn.feature_extraction.text import CountVectorizer filepath_dict = {'yelp': 'sentiment_analysis/yelp_labelled.txt', 'amazon': 'sentiment_analysis/amazon_cells_labelled.txt', 'imdb': 'sentiment_analysis/imdb_labelled.txt'} df_list = [] for source, filepath in filepath_dict.items(): df = pd.read_csv(filepath, names=['sentence', 'label'], sep='\t') df['source'] = source # Add another column filled with the source name df_list.append(df) df = pd.concat(df_list) print(df.iloc[0]) df_yelp = df[df['source'] == 'yelp'] sentences = df_yelp['sentence'].values y = df_yelp['label'].values sentences_train, sentences_test, y_train, y_test = train_test_split( sentences, y, test_size=0.25, random_state=1000) tokenizer = Tokenizer(num_words=5000) tokenizer.fit_on_texts(sentences_train) X_train = tokenizer.texts_to_sequences(sentences_train) X_test = tokenizer.texts_to_sequences(sentences_test) test_sent = ["This movie was nearly perfect. I only had one complaint."] test = tokenizer.texts_to_sequences(test_sent) print(test_sent) print(test) print("---------------------------") vocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index print(sentences_train[2]) print(X_train[2]) maxlen = 100 X_train = pad_sequences(X_train, padding='post', maxlen=maxlen) X_test = pad_sequences(X_test, padding='post', maxlen=maxlen) test = pad_sequences(test, padding='post', maxlen=maxlen) def create_embedding_matrix(filepath, word_index, embedding_dim): vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index embedding_matrix = np.zeros((vocab_size, embedding_dim)) with open(filepath,'r', encoding='UTF8') as f: for line in f: word, *vector = line.split() if word in word_index: idx = word_index[word] embedding_matrix[idx] = np.array( vector, dtype=np.float32)[:embedding_dim] return embedding_matrix embedding_dim = 50 embedding_matrix = create_embedding_matrix( 'glove.6B.50d.txt', tokenizer.word_index, embedding_dim) model = Sequential() model.add(layers.Embedding(vocab_size, embedding_dim, weights=[embedding_matrix], input_length=maxlen, trainable=True)) model.add(layers.Conv1D(128, 5, activation='relu')) model.add(layers.GlobalMaxPooling1D()) model.add(layers.Dense(10, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.summary() history = model.fit(X_train, y_train, epochs=10, verbose=False, validation_data=(X_test, y_test), batch_size=10) loss, accuracy = model.evaluate(X_train, y_train, verbose=False) print("Training Accuracy: {:.4f}".format(accuracy)) loss, accuracy = model.evaluate(X_test, y_test, verbose=False) print("Testing Accuracy: {:.4f}".format(accuracy)) ynew = model.predict_classes(test) print(ynew) for i in range(len(test)): print("X=%s, Predicted=%s" % (test[i], ynew[i])) ynew = model.predict_proba(test) for i in range(len(test)): print("X=%s, Predicted=%s" % (test[i], ynew[i]))
import binascii import os import warnings with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, padding from cryptography.hazmat.primitives.hmac import HMAC from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC from cryptography.hazmat.primitives.ciphers import ( Cipher, algorithms, modes ) CRYPTOGRAPHY_BACKEND = default_backend() from .exceptions import AnsibleVaultError, AnsibleVaultFormatError from .utils import to_bytes __all__ = ['CIPHERS'] def _unhexlify(b_data): try: return binascii.unhexlify(b_data) except (binascii.BinasciiError, TypeError) as exc: raise AnsibleVaultFormatError(f'Vault format unhexlify error: {exc}') def _parse_vaulttext(b_vaulttext): b_vaulttext = _unhexlify(b_vaulttext) b_salt, b_crypted_hmac, b_ciphertext = b_vaulttext.split(b'\n', 2) b_salt = _unhexlify(b_salt) b_ciphertext = _unhexlify(b_ciphertext) return b_ciphertext, b_salt, b_crypted_hmac def parse_vaulttext(b_vaulttext): """Parse the vaulttext. Args: b_vaulttext: A byte str containing the vaulttext (ciphertext, salt, crypted_hmac). Returns: A tuple of byte str of the ciphertext suitable for passing to a Cipher class's decrypt() function, a byte str of the salt, and a byte str of the crypted_hmac. Raises: AnsibleVaultFormatError: If the vaulttext format is invalid. """ # SPLIT SALT, DIGEST, AND DATA try: return _parse_vaulttext(b_vaulttext) except AnsibleVaultFormatError: raise except Exception as exc: raise AnsibleVaultFormatError(f'Vault vaulttext format error: {exc}') class CipherAES256: """Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. Keys are derived using PBKDF2. """ @staticmethod def _pbkdf2_prf(p, s): return HMAC.new(p, s, SHA256).digest() @classmethod def _gen_key_initctr(cls, b_password, b_salt): # 16 for AES 128, 32 for AES256 key_length = 32 iv_length = algorithms.AES.block_size // 8 kdf = PBKDF2HMAC( algorithm=hashes.SHA256(), length=2 * key_length + iv_length, salt=b_salt, iterations=10000, backend=CRYPTOGRAPHY_BACKEND) b_derivedkey = kdf.derive(b_password) b_iv = b_derivedkey[key_length * 2:key_length * 2 + iv_length] b_key1 = b_derivedkey[:key_length] b_key2 = b_derivedkey[key_length:key_length * 2] return b_key1, b_key2, b_iv @staticmethod def _encrypt(b_plaintext, b_key1, b_key2, b_iv): cipher = Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND) encryptor = cipher.encryptor() padder = padding.PKCS7(algorithms.AES.block_size).padder() b_ciphertext = encryptor.update(padder.update(b_plaintext) + padder.finalize()) b_ciphertext += encryptor.finalize() # COMBINE SALT, DIGEST AND DATA hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND) hmac.update(b_ciphertext) b_hmac = hmac.finalize() return (to_bytes(binascii.hexlify(b_hmac), errors='surrogateescape'), binascii.hexlify(b_ciphertext)) @classmethod def encrypt(cls, b_plaintext, secret): if secret is None: raise AnsibleVaultError('The secret passed to encrypt() was None') b_salt = os.urandom(32) b_password = secret.bytes b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt) b_hmac, b_ciphertext = cls._encrypt(b_plaintext, b_key1, b_key2, b_iv) b_vaulttext = b'\n'.join([binascii.hexlify(b_salt), b_hmac, b_ciphertext]) # Unnecessary but getting rid of it is a backwards incompatible vault # format change b_vaulttext = binascii.hexlify(b_vaulttext) return b_vaulttext @classmethod def _decrypt(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv): # b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt) # EXIT EARLY IF DIGEST DOESN'T MATCH hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND) hmac.update(b_ciphertext) try: hmac.verify(_unhexlify(b_crypted_hmac)) except InvalidSignature as exc: raise AnsibleVaultError(f'HMAC verification failed: {exc}') cipher = Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND) decryptor = cipher.decryptor() unpadder = padding.PKCS7(128).unpadder() b_plaintext = unpadder.update(decryptor.update(b_ciphertext) + decryptor.finalize()) + unpadder.finalize() return b_plaintext @classmethod def decrypt(cls, b_vaulttext, secret): b_ciphertext, b_salt, b_crypted_hmac = parse_vaulttext(b_vaulttext) b_password = secret.bytes b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt) b_plaintext = cls._decrypt(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv) return b_plaintext CIPHERS = { 'AES256': CipherAES256, }
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import ssl import time import traceback from datetime import datetime from nvflare.fuel.hci.cmd_arg_utils import split_to_args from nvflare.fuel.hci.conn import Connection, receive_and_process from nvflare.fuel.hci.proto import make_error from nvflare.fuel.hci.reg import CommandModule, CommandRegister from nvflare.fuel.hci.security import get_certificate_common_name from nvflare.fuel.hci.table import Table from .api_status import APIStatus class ReplyProcessor(object): """An abstract class for parsing server's response.""" def reply_start(self, client, reply_json): pass def process_string(self, client, item: str): pass def process_success(self, client, item: str): pass def process_error(self, client, err: str): pass def process_table(self, client, table: Table): pass def process_dict(self, client, d: dict): pass def process_shutdown(self, client, msg: str): pass def process_token(self, client, token: str): pass def protocol_error(self, client, err: str): pass def reply_done(self, client): pass class _LoginReplyProcessor(ReplyProcessor): """ Reply processor for handling login and setting the token for the admin client. """ def process_string(self, client, item: str): client.login_result = item def process_token(self, client, token: str): client.token = token class _CmdListReplyProcessor(ReplyProcessor): """ Reply processor to register available commands after getting back a table of commands from the server. """ def process_table(self, client, table: Table): for i in range(len(table.rows)): if i == 0: # this is header continue row = table.rows[i] if len(row) < 5: return scope = row[0] cmd_name = row[1] desc = row[2] usage = row[3] confirm = row[4] # if confirm == 'auth' and not client.require_login: # the user is not authenticated - skip this command # continue client.server_cmd_reg.add_command( scope_name=scope, cmd_name=cmd_name, desc=desc, usage=usage, handler=None, authz_func=None, visible=True, confirm=confirm, ) client.server_cmd_received = True class AdminAPI: """Underlying API to keep certs, keys and connection information and to execute admin commands through do_command. Args: host: cn provisioned for the project, with this fully qualified domain name resolving to the IP of the FL server port: port provisioned as admin_port for FL admin communication, by default provisioned as 8003, must be int ca_cert: path to CA Cert file, by default provisioned rootCA.pem client_cert: path to admin client Cert file, by default provisioned as client.crt client_key: path to admin client Key file, by default provisioned as client.key upload_dir: File transfer upload directory. Folders uploaded to the server to be deployed must be here. Folder must already exist and be accessible. download_dir: File transfer download directory. Can be same as upload_dir. Folder must already exist and be accessible. server_cn: server cn (only used for validating server cn) cmd_modules: command modules to load and register poc: Whether to enable poc mode for using the proof of concept example without secure communication. debug: Whether to print debug messages, which can help with diagnosing problems. False by default. """ def __init__( self, host, port, ca_cert="", client_cert="", client_key="", upload_dir="", download_dir="", server_cn=None, cmd_modules=None, poc=False, debug=False, ): if cmd_modules is None: from .file_transfer import FileTransferModule cmd_modules = [FileTransferModule(upload_dir=upload_dir, download_dir=download_dir)] self.host = host self.port = port self.poc = poc if not self.poc: if len(ca_cert) <= 0: raise Exception("missing CA Cert file name") self.ca_cert = ca_cert if len(client_cert) <= 0: raise Exception("missing Client Cert file name") self.client_cert = client_cert if len(client_key) <= 0: raise Exception("missing Client Key file name") self.client_key = client_key self.server_cn = server_cn # does not seem to be used right now self.debug = debug # for reply callbacks self.reply_processor = None self.command_result = None # for login self.token = None self.login_result = None self.server_cmd_reg = CommandRegister(app_ctx=self) self.client_cmd_reg = CommandRegister(app_ctx=self) self.server_cmd_received = False self.shutdown_msg = None self.all_cmds = [] self.iobuffer = None self._load_client_cmds(cmd_modules) if poc: print("Please log in with login_with_password(username, password) to enable server cmds.") def _load_client_cmds(self, cmd_modules): if cmd_modules: if not isinstance(cmd_modules, list): raise TypeError("cmd_modules must be a list") for m in cmd_modules: if not isinstance(m, CommandModule): raise TypeError("cmd_modules must be a list of CommandModule") self.client_cmd_reg.register_module(m, include_invisible=False) self.client_cmd_reg.finalize(self.register_command) def register_command(self, cmd_entry): self.all_cmds.append(cmd_entry.name) def logout(self): """ Send logout command to server. """ return self.server_execute("_logout") def login(self, username: str): """Login using certification files and retrieve server side commands. Args: username: Username Returns: Object containing status and details """ self.login_result = None self._try_command(f"_cert_login {username}", _LoginReplyProcessor()) if self.login_result is None: return {"status": APIStatus.ERROR_RUNTIME, "details": "Communication Error - please try later"} elif self.login_result == "REJECT": return {"status": APIStatus.ERROR_CERT, "details": "Incorrect user name or certificate"} # get command list from server self.server_cmd_received = False self._try_command("_commands", _CmdListReplyProcessor()) self.server_cmd_reg.finalize(self.register_command) if not self.server_cmd_received: return {"status": APIStatus.ERROR_RUNTIME, "details": "Communication Error - please try later"} return {"status": APIStatus.SUCCESS, "details": "Login success"} def login_with_password(self, username: str, password: str): """Login using password for poc example. Args: username: Username password: password Returns: Object containing status and details """ self.login_result = None self._try_command(f"_login {username} {password}", _LoginReplyProcessor()) if self.login_result is None: return {"status": APIStatus.ERROR_RUNTIME, "details": "Communication Error - please try later"} elif self.login_result == "REJECT": return {"status": APIStatus.ERROR_CERT, "details": "Incorrect user name or certificate"} # get command list from server self.server_cmd_received = False self._try_command("_commands", _CmdListReplyProcessor()) self.server_cmd_reg.finalize(self.register_command) if not self.server_cmd_received: return {"status": APIStatus.ERROR_RUNTIME, "details": "Communication Error - please try later"} return {"status": APIStatus.SUCCESS, "details": "Login success"} def _send_to_sock(self, sock, command, process_json_func): conn = Connection(sock, self) conn.append_command(command) if self.token: conn.append_token(self.token) conn.close() ok = receive_and_process(sock, process_json_func) if not ok: process_json_func( make_error("Failed to communicate with Admin Server {} on {}".format(self.host, self.port)) ) def _process_server_reply(self, resp): """Process the server reply and store the status/details into client's `command_result` Args: resp: The raw response that returns by the server. """ if self.debug: print("DEBUG: Server Reply: {}".format(resp)) self.command_result = resp # this resp is what is usually directly used to return, straight from server reply_processor = ReplyProcessor() if self.reply_processor is None else self.reply_processor reply_processor.reply_start(self, resp) if resp is not None: data = resp["data"] for item in data: it = item["type"] if it == "string": reply_processor.process_string(self, item["data"]) elif it == "success": reply_processor.process_success(self, item["data"]) elif it == "error": reply_processor.process_error(self, item["data"]) break elif it == "table": table = Table(None) table.set_rows(item["rows"]) reply_processor.process_table(self, table) elif it == "dict": reply_processor.process_dict(self, item["data"]) elif it == "token": reply_processor.process_token(self, item["data"]) elif it == "shutdown": reply_processor.process_shutdown(self, item["data"]) break else: reply_processor.protocol_error(self, "Invalid item type: " + it) break else: reply_processor.protocol_error(self, "Protocol Error") reply_processor.reply_done(self) def _try_command(self, command, reply_processor): """Try to execute a command on server side. Args: command: The command to execute. reply_processor: An instance of ReplyProcessor """ # process_json_func can't return data because how "receive_and_process" is written. self.reply_processor = reply_processor process_json_func = self._process_server_reply try: if not self.poc: # SSL communication ctx = ssl.create_default_context() ctx.verify_mode = ssl.CERT_REQUIRED ctx.check_hostname = False ctx.load_verify_locations(self.ca_cert) ctx.load_cert_chain(certfile=self.client_cert, keyfile=self.client_key) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: with ctx.wrap_socket(sock) as ssock: ssock.connect((self.host, self.port)) if self.server_cn: # validate server CN cn = get_certificate_common_name(ssock.getpeercert()) if cn != self.server_cn: process_json_func( make_error("wrong server: expecting {} but connected {}".format(self.server_cn, cn)) ) return self._send_to_sock(ssock, command, process_json_func) else: # poc without certs with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: sock.connect((self.host, self.port)) self._send_to_sock(sock, command, process_json_func) except Exception as ex: if self.debug: traceback.print_exc() process_json_func( make_error("Failed to communicate with Admin Server {} on {}: {}".format(self.host, self.port, ex)) ) def do_command(self, command): """A convenient method to call commands using string. Args: command (str): command Returns: Object containing status and details (or direct response from server, which originally was just time and data) """ args = split_to_args(command) cmd_name = args[0] self.command_result = None # check client side commands entries = self.client_cmd_reg.get_command_entries(cmd_name) if len(entries) > 1: return { "status": APIStatus.ERROR_SYNTAX, "details": f"Ambiguous client command {cmd_name} - qualify with scope", } elif len(entries) == 1: ent = entries[0] ent.handler(args, self.client_cmd_reg.app_ctx) if self.command_result is None: return {"status": APIStatus.ERROR_RUNTIME, "details": "Client did not respond"} return self.command_result # check server side commands entries = self.server_cmd_reg.get_command_entries(cmd_name) if len(entries) <= 0: return { "status": APIStatus.ERROR_SYNTAX, "details": f"Command {cmd_name} not found in server or client cmds", } elif len(entries) > 1: return { "status": APIStatus.ERROR_SYNTAX, "details": f"Ambiguous server command {cmd_name} - qualify with scope", } return self.server_execute(command) def server_execute(self, command, reply_processor=None): """ This had to be kept relatively the same as in the hci Admin client with a wrapper to return, because the client is passed to command_reg commands and reusing that like file_transfer, server_execute is directly called. """ start = time.time() self._try_command(command, reply_processor) secs = time.time() - start usecs = int(secs * 1000000) if self.debug: print(f"DEBUG: server_execute Done [{usecs} usecs] {datetime.now()}") if self.command_result is None: return {"status": APIStatus.ERROR_RUNTIME, "details": "Server did not respond"} if "status" not in self.command_result: self.command_result.update({"status": APIStatus.SUCCESS}) return self.command_result def write_string(self, data: str): content = data + "\n" self.iobuffer.write(content) def _show_one_command(self, cmd_name, reg): entries = reg.get_command_entries(cmd_name) if len(entries) <= 0: self.write_string("Undefined command {}\n".format(cmd_name)) return for e in entries: if not e.visible: continue if len(e.scope.name) > 0: self.write_string("Command: {}.{}".format(e.scope.name, cmd_name)) else: self.write_string("Command: {}".format(cmd_name)) self.write_string("Description: {}".format(e.desc)) self.write_string("Usage: {}\n".format(e.usage)) def _show_commands(self, reg: CommandRegister): table = Table(["Scope", "Command", "Description"]) for scope_name in sorted(reg.scopes): scope = reg.scopes[scope_name] for cmd_name in sorted(scope.entries): e = scope.entries[cmd_name] if e.visible: table.add_row([scope_name, cmd_name, e.desc]) table.write(self.iobuffer)
# Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # flake8: noqa from . import semisl_cls_head from . import multi_classifier_head from . import task_incremental_classifier_head from . import non_linear_cls_head from . import cls_incremental_head from . import custom_vfnet_head from . import custom_atss_head from . import custom_retina_head from . import custom_ssd_head from . import cross_dataset_detector_head
aluno = dict() aluno['nome'] = str(input('Nome: ')).capitalize().strip() aluno['media'] = float(input(f'Media de {aluno["nome"]}: ')) print('-' * 20) print(f'O nome do aluno é {aluno["nome"]}') print(f'a media é {aluno["media"]}') if aluno['media'] >= 7: print('APROVADO') elif aluno['media'] >= 5: print('RECUPERÇÃO') else: print('REPROVADO')
from django import forms from crispy_forms.helper import FormHelper from crispy_forms.layout import Fieldset, Layout from drnalpha.companies.models import Company from drnalpha.regulations.models import Category, Importance, Jurisdiction, Regulation from drnalpha.sic_codes.models import Code class FinderForm(forms.Form): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.helper = FormHelper() self.helper.form_tag = False def clean(self): # Any QuerySet values from cleaned ModelMultipleChoice fields can't be stored # in the session as is. Here we call custom dump_<fieldname>() methods to # convert them into something that can, such as list of primary keys. self.dumped_data = {} if self.cleaned_data: for name, value in self.cleaned_data.items(): if hasattr(self, f"dump_{name}"): value = getattr(self, f"dump_{name}")() self.dumped_data[name] = value # class InfoForm(FinderForm, forms.ModelForm): # class Meta: # model = Company # fields = ["name", "postal_code"] # labels = { # "name": "What is your company name?", # "postcal_code": "What is your postal code?", # } # widgets = { # "name": forms.widgets.TextInput(attrs={"autocomplete": "organisation"}), # "postcal_code": forms.widgets.TextInput( # attrs={"autocomplete": "postal-code"} # ), # } class LocationsForm(FinderForm, forms.ModelForm): class Meta: model = Company fields = ["jurisdictions"] labels = { "jurisdictions": "Where does your business operate?", } widgets = { "jurisdictions": forms.widgets.CheckboxSelectMultiple, } def dump_jurisdictions(self): data = self.cleaned_data["jurisdictions"] if data: return list(data.values_list("pk", flat=True)) return [] class EmployeesForm(FinderForm, forms.ModelForm): class Meta: model = Company fields = [ "full_time_permanent_employees", "full_time_contract_employees", "part_time_permanent_employees", "part_time_contract_employees", ] labels = { "full_time_permanent_employees": "Permanent", "full_time_contract_employees": "Contract", "part_time_permanent_employees": "Permanent", "part_time_contract_employees": "Contract", } widgets = { "full_time_permanent_employees": forms.widgets.TextInput( attrs={ "pattern": "[0-9]*", "inputmode": "numeric", "class": "govuk-input govuk-input--width-4", } ), "full_time_contract_employees": forms.widgets.TextInput( attrs={ "pattern": "[0-9]*", "inputmode": "numeric", "class": "govuk-input govuk-input--width-4", } ), "part_time_permanent_employees": forms.widgets.TextInput( attrs={ "pattern": "[0-9]*", "inputmode": "numeric", "class": "govuk-input govuk-input--width-4", } ), "part_time_contract_employees": forms.widgets.TextInput( attrs={ "pattern": "[0-9]*", "inputmode": "numeric", "class": "govuk-input govuk-input--width-4", } ), } def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.helper.labels_small = True self.helper.layout = Layout( Fieldset( "How many people do you employ full-time?", "full_time_permanent_employees", "full_time_contract_employees", ), Fieldset( "How many people do you employ part-time?", "part_time_permanent_employees", "part_time_contract_employees", ), ) class ActivityModelMultipleChoiceField(forms.ModelMultipleChoiceField): def label_from_instance(self, obj): return obj.title class ActivitiesForm(FinderForm, forms.ModelForm): sic_codes = ActivityModelMultipleChoiceField( label="Which of these activities describes what your business does?", queryset=Code.objects.related_to_food(), required=True, widget=forms.widgets.CheckboxSelectMultiple, ) class Meta: model = Company fields = ["sic_codes"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.helper.checkboxes_small = True self.helper.checkboxes_columns = True def dump_sic_codes(self): data = self.cleaned_data["sic_codes"] if data: return list(data.values_list("pk", flat=True)) return [] class ReviewForm(FinderForm): pass class CompanyForm(forms.ModelForm): class Meta: model = Company fields = [ "name", "jurisdictions", "full_time_permanent_employees", "full_time_contract_employees", "part_time_permanent_employees", "part_time_contract_employees", "sic_codes", ] @classmethod def for_finder_data(cls, data): flattened_data = {} for item in data.values(): flattened_data.update(item) return cls(data=flattened_data) class RegulationFinderForm(FinderForm, CompanyForm): def __init__(self, data, *args, **kwargs): self.queryset = Regulation.objects.all() super().__init__(data, *args, **kwargs) def get_filtered_queryset(self): return ( self.queryset.for_jurisdictions(self.cleaned_data["jurisdictions"]) .for_sic_codes(self.cleaned_data["sic_codes"]) .distinct() ) class RegulationFilterForm(FinderForm): FILTERS = { "importance": "importance__in", "jurisdiction": "jurisdictions__in", "category": "categories__in", } importance = forms.TypedMultipleChoiceField( choices=[], coerce=int, required=False, widget=forms.widgets.CheckboxSelectMultiple, ) jurisdiction = forms.ModelMultipleChoiceField( label="Location", queryset=None, required=False, to_field_name="slug", widget=forms.widgets.CheckboxSelectMultiple, ) category = forms.ModelMultipleChoiceField( queryset=None, required=False, to_field_name="slug", widget=forms.widgets.CheckboxSelectMultiple, ) def __init__(self, data, *, queryset): self.queryset = queryset super().__init__(data) self.helper.checkboxes_small = True self.helper.hide_label = True # custom setting self.fields["importance"].choices = self.get_importance_choices() self.fields["jurisdiction"].queryset = self.get_jurisdiction_queryset() self.fields["category"].queryset = self.get_category_queryset() def get_importance_choices(self): values = ( self.queryset.values_list("importance", flat=True) .distinct() .order_by("importance") ) return [(value, Importance(value).label) for value in values] def get_jurisdiction_queryset(self): return ( Jurisdiction.objects.filter(regulations__in=list(self.queryset)) .distinct() .order_by("name") ) def get_category_queryset(self): return ( Category.objects.filter(regulations__in=list(self.queryset)) .distinct() .order_by("name") ) def get_filtered_queryset(self): filters = {} for key in self.data.keys(): lookup = self.FILTERS.get(key) value = self.cleaned_data.get(key) if lookup and value: filters[lookup] = value return self.queryset.filter(**filters).distinct()
### Setup # Import libraries from sklearn.metrics import pairwise_distances import numpy as np import matplotlib.pyplot as plt import matplotlib.style import matplotlib as mpl import os from random import shuffle from statistics import mean from scipy import stats import pickle, argparse, json from ProgressBar import progress with open('Hyper_Parameters.json', 'r') as f: hp_Dict = json.load(f) #For sorting sort_List_Dict = { 'PSI': ['p','t','k','f','θ','s','ʃ','b','d','g','v','ð','z','m','n','ŋ','l','r','w','u','j','i','ɪ','eɪ','ə','ʊ','ɔ','a','aɪ','oʊ','aʊ','æ','ɛ'], 'FSI': ['OBSTRUENT', 'LABIAL', 'CORONAL', 'DORSAL', 'PLOSIVE', 'FRICATIVE', 'NASAL', 'HIGH', 'FRONT', 'LOW', 'BACK','VOICED', 'SYLLABIC','SONORANT'] } class RSA_Analyzer: def __init__(self, data_Path, export_Path, data_Type): if not data_Type.upper() in ['PSI', 'FSI']: raise ValueError('Data type must be \'PSI\' or \'FSI\'.') self.export_Path = export_Path self.data_Type = data_Type self.mertic_Type_List = ['euclidean', 'correlation', 'cosine'] self.Mesgarani_Distance_Load() self.Data_Generate(data_Path) def Mesgarani_Distance_Load(self): with open('Mesgarani_Distance.pickle', 'rb') as f: self.mestarani_Distance_Dict = pickle.load(f)[self.data_Type] def Data_Generate(self, data_Path): data_Dict = {} with open(data_Path, 'rb') as f: for line in f.readlines()[1:]: raw_Data = line.decode("utf-8").strip().split('\t') data_Dict[raw_Data[0]] = np.array([float(x) for x in raw_Data[1:]]) self.data_Array = np.vstack([data_Dict[x] for x in sort_List_Dict[self.data_Type]]) #Phoneme feature when data type is PSI if self.data_Type == 'PSI': phoneme_Feature_Dict = {} with open(hp_Dict['Hidden_Analysis']['Phoneme_Feature'], 'rb') as f: for line in f.readlines()[1:]: raw_Data = line.decode("utf-8").strip().split('\t') phoneme_Feature_Dict[raw_Data[1]] = np.array([float(x) for x in raw_Data[3:]]) self.phoneme_Feature_Array = np.vstack([phoneme_Feature_Dict[x] for x in sort_List_Dict[self.data_Type]]) def RSA_Generate(self, permutation_Nums= 100000): os.makedirs(self.export_Path, exist_ok= True) rsa_Dict = {} permutation_Cor_List_Dict = {} rsa_Dict['EARShot', 'Mesgarani']= { metric_Type: self.RSA_Calc(self.data_Array, self.mestarani_Distance_Dict[metric_Type], metric_Type, False) for metric_Type in self.mertic_Type_List } permutation_Cor_List_Dict['EARShot', 'Mesgarani'] = {} for metric_Type in self.mertic_Type_List: permutation_Cor_List_Dict['EARShot', 'Mesgarani'][metric_Type] = [] for index in range(permutation_Nums): permutation_Cor_List_Dict['EARShot', 'Mesgarani'][metric_Type].append( self.RSA_Calc(self.data_Array, self.mestarani_Distance_Dict[metric_Type], metric_Type, True)[1] ) progress(index + 1, permutation_Nums, status= '{} EARShot-Mesgarani RSA based on {}'.format(self.data_Type, metric_Type)) print() if self.data_Type == 'PSI': phoneme_Feature_Distance_Dict = { metric_Type: pairwise_distances(self.phoneme_Feature_Array.astype(np.float64), metric = metric_Type) for metric_Type in self.mertic_Type_List } rsa_Dict['EARShot', 'Phoneme_Feature']= { metric_Type: self.RSA_Calc(self.data_Array, phoneme_Feature_Distance_Dict[metric_Type], metric_Type, False) for metric_Type in self.mertic_Type_List } permutation_Cor_List_Dict['EARShot', 'Phoneme_Feature'] = {} for metric_Type in self.mertic_Type_List: permutation_Cor_List_Dict['EARShot', 'Phoneme_Feature'][metric_Type] = [] for index in range(permutation_Nums): permutation_Cor_List_Dict['EARShot', 'Phoneme_Feature'][metric_Type].append( self.RSA_Calc(self.data_Array, phoneme_Feature_Distance_Dict[metric_Type], metric_Type, True)[1] ) progress(index + 1, permutation_Nums, status= '{} EARShot-P&F RSA based on {}'.format(self.data_Type, metric_Type)) print() rsa_Dict['Phoneme_Feature', 'Mesgarani']= { metric_Type: self.RSA_Calc(self.phoneme_Feature_Array, self.mestarani_Distance_Dict[metric_Type], metric_Type, False) for metric_Type in self.mertic_Type_List } permutation_Cor_List_Dict['Phoneme_Feature', 'Mesgarani'] = {} for metric_Type in self.mertic_Type_List: permutation_Cor_List_Dict['Phoneme_Feature', 'Mesgarani'][metric_Type] = [] for index in range(permutation_Nums): permutation_Cor_List_Dict['Phoneme_Feature', 'Mesgarani'][metric_Type].append( self.RSA_Calc(self.phoneme_Feature_Array, self.mestarani_Distance_Dict[metric_Type], metric_Type, True)[1] ) progress(index + 1, permutation_Nums, status= '{} P&F-Mesgarani RSA based on {}'.format(self.data_Type, metric_Type)) print() for data_Label, base_Label in [('EARShot', 'Mesgarani')] + ([('EARShot', 'Phoneme_Feature'), ('Phoneme_Feature', 'Mesgarani')] if self.data_Type == 'PSI' else []): for metric_Type in self.mertic_Type_List: p_Value = (1 - len(np.less(permutation_Cor_List_Dict[data_Label, base_Label][metric_Type], rsa_Dict[data_Label, base_Label][metric_Type][1])) / len(permutation_Cor_List_Dict[data_Label, base_Label][metric_Type])) fig = self.Plot_RDM( dm= rsa_Dict[data_Label, base_Label][metric_Type][0], label_List= sort_List_Dict[self.data_Type], metric= metric_Type, fig_title = '{0} {1} DSM: \n {2} cor: {3:.03f} \n Permutation cor: {4:.03f} \n Permutation test: p = {5:.03f}'.format( data_Label, self.data_Type, base_Label, rsa_Dict[data_Label, base_Label][metric_Type][1], np.mean(permutation_Cor_List_Dict[data_Label, base_Label][metric_Type]), p_Value ) ) fig.savefig(os.path.join(self.export_Path, 'RSA.{}_to_{}.{}.{}.png'.format(data_Label, base_Label, self.data_Type, metric_Type)), dpi = 300) plt.close() extract_List = [','.join(self.mertic_Type_List)] extract_List.append(','.join(['{}'.format(rsa_Dict[data_Label, base_Label][metric_Type][1]) for metric_Type in self.mertic_Type_List])) with open(os.path.join(self.export_Path, 'RSA.{}_to_{}.{}.Actual.csv'.format(data_Label, base_Label, self.data_Type)), 'w') as f: f.write('\n'.join(extract_List)) extract_List = [','.join(self.mertic_Type_List)] extract_List.extend([ ','.join(['{}'.format(x) for x in permutation_List]) for permutation_List in zip(*[ permutation_Cor_List_Dict[data_Label, base_Label][metric_Type] for metric_Type in self.mertic_Type_List ]) ]) with open(os.path.join(self.export_Path, 'RSA.{}_to_{}.{}.Shuffle.csv'.format(data_Label, base_Label, self.data_Type)), 'w') as f: f.write('\n'.join(extract_List)) def Distance_Tri_Calc(self, array): # When we compute correlations, we only consider the off-diagonal # elements that are in the lower triangle doing the upper triangle # would have gotten the same results distance_Tri = [] for index in range(array.shape[0]): distance_Tri.extend(array[index, :index]) return distance_Tri def RSA_Calc(self, data_Array, base_Distance, metric_Type, permutation_Test= False): if permutation_Test: shuffled_Index = list(range(data_Array.shape[0])) shuffle(shuffled_Index) data_Array = data_Array[shuffled_Index] data_Distance = pairwise_distances(data_Array.astype(np.float64), metric = metric_Type) data_Distance_Tri = self.Distance_Tri_Calc(data_Distance) base_Distance_Tri = self.Distance_Tri_Calc(base_Distance) return data_Distance, np.corrcoef(data_Distance_Tri, base_Distance_Tri)[0,1] def Plot_RDM(self, dm, label_List, metric= '', fig_title= ''): label_List = ['{} '.format(x) for x in label_List] #Spacing fig, (dm_ax) = plt.subplots(nrows = 1, ncols = 1, constrained_layout = True) fig.suptitle(fig_title) dm_ax.set_title('RDM: {}'.format(metric)) dm_fig = dm_ax.imshow(dm) plt.xticks(range(len(label_List)), label_List, fontsize=6.5, rotation = 90) plt.yticks(range(len(label_List)), label_List, fontsize=6.5) fig.colorbar(dm_fig, ax = dm_ax) return fig if __name__ == '__main__': argParser = argparse.ArgumentParser() argParser.add_argument("-d", "--directory", required= True, type= str) # argParser.add_argument("-e", "--epoch", required= True, type= int) argParser.add_argument("-c", "--criterion", required= True, type= float) argParser.add_argument("-pn", "--permutation_nums", default= 100000, type= int) argument_Dict = vars(argParser.parse_args()) # selected_Epoch = int(argument_Dict["epoch"]) selected_Criterion = float(argument_Dict["criterion"]) permutation_Nums = int(argument_Dict['permutation_nums'] or 100000) for data_Type in ['PSI', 'FSI']: work_Dir = os.path.join(hp_Dict['Result_Path'], argument_Dict["directory"], 'Hidden', 'Map', data_Type).replace('\\', '/') data_Path = os.path.join(work_Dir, 'TXT', '{}.C_{:.2f}.I_All.txt'.format(data_Type, selected_Criterion)).replace('\\', '/') export_Path = os.path.join(work_Dir, 'RSA') new_Analyzer = RSA_Analyzer( data_Path = data_Path, export_Path = export_Path, data_Type = data_Type ) new_Analyzer.RSA_Generate(permutation_Nums)
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the MIT license. # See the LICENSE file in the project root for more information. from re import compile as compile_regexp, IGNORECASE from typing import Iterable, List, Mapping, Optional, Sequence from ..commonlib.bench_file import BenchFile, BenchFileAndPath from ..commonlib.collection_util import ( cat_unique, combine_mappings, empty_mapping, empty_sequence, is_empty, ) from ..commonlib.option import non_null from ..commonlib.type_utils import T from ..commonlib.util import did_you_mean, get_or_did_you_mean, try_remove_str_start from .join_analysis import ( JOIN_PER_HEAP_METRICS_ALIASES, JOIN_PER_GC_METRICS_ALIASES, JOIN_RUN_METRICS_ALIASES, ) from .run_metrics import TEST_STATUS_METRICS from .single_gc_metrics import ( GC_HEAP_COMPACT_REASON_METRICS, GC_HEAP_EXPAND_REASON_METRICS, GC_MECHANISM_METRICS, GC_REASON_METRICS, GC_TYPE_METRICS, ) from .single_heap_metrics import MARK_ROOT_PROMOTED_METRICS, MARK_ROOT_TIME_METRICS from .types import ( NAME_TO_RUN_METRIC, NAME_TO_SINGLE_GC_METRIC, NAME_TO_SINGLE_HEAP_METRIC, RunMetric, RunMetrics, ScoreRunMetric, SingleGCMetric, SingleGCMetrics, SingleHeapMetrics, run_metric_must_exist_for_name, ) def get_parsed_and_score_metrics( bench: Optional[BenchFileAndPath], metrics: Optional[Sequence[str]], default_to_important: bool ) -> RunMetrics: score_metrics = empty_sequence() if bench is None else tuple(get_score_metrics(bench.content)) if metrics is None and not is_empty(score_metrics): return score_metrics return cat_unique(parse_run_metrics_arg(metrics, default_to_important), score_metrics) _SINGLE_GC_REASONS_METRICS: Mapping[str, Sequence[str]] = { "mechanisms": [m.name for m in (*GC_TYPE_METRICS, *GC_MECHANISM_METRICS)], "reasons": [m.name for m in GC_REASON_METRICS], "compact-reasons": [m.name for m in GC_HEAP_COMPACT_REASON_METRICS], "expand-reasons": [m.name for m in GC_HEAP_EXPAND_REASON_METRICS], } _RUN_REASONS_METRICS: Mapping[str, Sequence[str]] = { k: [f"Pct{v}" for v in vs] for k, vs in _SINGLE_GC_REASONS_METRICS.items() } def parse_run_metrics_arg( metrics: Optional[Sequence[str]], default_to_important: bool = False ) -> RunMetrics: return _parse_metrics_arg( names=metrics, name_to_metric=NAME_TO_RUN_METRIC, metric_type_name="run", important=_IMPORTANT_RUN_METRICS, special=combine_mappings( _RUN_REASONS_METRICS, JOIN_RUN_METRICS_ALIASES, { "markTime": [ m for metric in MARK_ROOT_TIME_METRICS for m in (f"{metric.name}_Max_Mean", f"{metric.name}_Mean_Mean") ] }, {"just-test-status": [m.name for m in TEST_STATUS_METRICS]}, ), default_to_important=default_to_important, ) def parse_run_metric_arg(metric: str) -> RunMetric: return _parse_metric_arg(metric, NAME_TO_RUN_METRIC, "run metric") _IMPORTANT_RUN_METRICS: Sequence[str] = ( "TotalNumberGCs", "CountUsesLOHCompaction", "CountIsGen0", "CountIsGen1", "CountIsBackground", "CountIsBlockingGen2", "PctIsEphemeral", "FirstToLastGCSeconds", "NumHeaps", "HeapSizeBeforeMB_Max", "HeapSizeAfterMB_Max", "HeapSizeBeforeMB_Mean", "HeapSizeAfterMB_Mean", "FirstToLastEventSeconds", "FirstEventToFirstGCSeconds", "PctTimeInGC_WhereIsNonBackground", "PctTimePausedInGC", "PctUsesCompaction", "PauseDurationSeconds_SumWhereIsGen1", "PauseDurationSeconds_SumWhereIsBackground", "PauseDurationSeconds_SumWhereIsBlockingGen2", "PauseDurationSeconds_SumWhereUsesLOHCompaction", "TotalAllocatedMB", "TotalLOHAllocatedMB", "PauseDurationMSec_Mean", "PauseDurationMSec_MeanWhereIsEphemeral", "PauseDurationMSec_95PWhereIsGen0", "PauseDurationMSec_95PWhereIsGen1", "PauseDurationMSec_95PWhereIsBackground", "PauseDurationMSec_95PWhereIsBlockingGen2", "PauseDurationSeconds_Sum", "PauseDurationSeconds_SumWhereIsNonBackground", "PctReductionInHeapSize_Mean", "PromotedMB_MeanWhereIsBlockingGen2", "PromotedMB_MeanWhereIsGen0", "PromotedMB_MeanWhereIsGen1", ) def get_score_metrics(bench: BenchFile) -> Iterable[RunMetric]: if bench.scores is None: pass else: for k, v in bench.scores.items(): for name, _ in v.items(): yield run_metric_must_exist_for_name(name) yield ScoreRunMetric(k, v) def parse_single_gc_metrics_arg( metrics: Optional[Sequence[str]], default_to_important: bool = False ) -> SingleGCMetrics: return _parse_metrics_arg( names=metrics, name_to_metric=NAME_TO_SINGLE_GC_METRIC, metric_type_name="single-gc", important=_SINGLE_GC_IMPORTANT_METRICS, special=combine_mappings( _SINGLE_GC_REASONS_METRICS, { "markTime": [ m for metric in MARK_ROOT_TIME_METRICS for m in (f"{metric.name}_Max", f"{metric.name}_Mean") ], "markPromoted": [ m for metric in MARK_ROOT_PROMOTED_METRICS for m in (f"{metric.name}_Max", f"{metric.name}_Mean") ], }, JOIN_PER_GC_METRICS_ALIASES, ), default_to_important=default_to_important, ) _SINGLE_GC_IMPORTANT_METRICS: Sequence[str] = ( "Generation", "IsConcurrent", "AllocRateMBSec", "AllocedSinceLastGCMB", "PauseDurationMSec", "PromotedMB", "HeapSizeBeforeMB", "HeapSizeAfterMB", # "PctReductionInHeapSize", "UsesCompaction", ) def parse_single_gc_metric_arg(metric: str) -> SingleGCMetric: return _parse_metric_arg(metric, NAME_TO_SINGLE_GC_METRIC, "single-gc metric") def parse_single_heap_metrics_arg( metrics: Optional[Sequence[str]], default_to_important: bool = False ) -> SingleHeapMetrics: return _parse_metrics_arg( names=metrics, name_to_metric=NAME_TO_SINGLE_HEAP_METRIC, metric_type_name="single-heap", important=None, special=JOIN_PER_HEAP_METRICS_ALIASES, default_to_important=default_to_important, ) def _parse_metrics_arg( names: Optional[Sequence[str]], name_to_metric: Mapping[str, T], metric_type_name: str, important: Optional[Sequence[str]] = None, special: Mapping[str, Sequence[str]] = empty_mapping(), default_to_important: bool = False, ) -> Sequence[T]: kind_of_metric = f"{metric_type_name} metric" def all_important() -> Iterable[T]: for name in non_null(important): try: yield name_to_metric[name] except KeyError: raise Exception(did_you_mean(name_to_metric, name, name=kind_of_metric)) from None def metrics() -> Iterable[T]: if names is None and not default_to_important: pass elif names is None: yield from all_important() elif len(names) == 1 and names[0] == "all": yield from name_to_metric.values() elif len(names) == 1 and names[0] == "none": pass else: for m in names: if m == "important": yield from all_important() elif m in special: for s in special[m]: yield get_or_did_you_mean(name_to_metric, s, kind_of_metric) elif m in name_to_metric: yield name_to_metric[m] else: rgx_str = try_remove_str_start(m, "rgx:") assert rgx_str is not None, did_you_mean(name_to_metric, m, kind_of_metric) # Try using as a regexp rgx = compile_regexp(rgx_str, IGNORECASE) metrics = [ metric for name, metric in name_to_metric.items() if rgx.search(name) is not None ] assert not is_empty(metrics), did_you_mean(name_to_metric, m, kind_of_metric) yield from metrics res: List[T] = [] for met in metrics(): assert met not in res, f"Duplicate metric {met}" res.append(met) return res def _parse_metric_arg(name: str, name_to_metric: Mapping[str, T], metric_type_name: str) -> T: try: return name_to_metric[name] except KeyError: raise Exception(did_you_mean(name_to_metric, name, metric_type_name)) from None
from bottle import request, response from . import auth from . import family from . import genus from . import taxon from . import accession from . import source from . import plant from . import location from . import report from . import organization from . import search from . import schema from . import user from . import invitation from bauble import app, API_ROOT @app.hook('after_request') def after_request_hook(*args): # this can be used for debugging but any other request hooks should be setup in bauble.plugins pass @app.hook('before_request') def before_request_hook(): # this can be used for debugging but any other request hooks should be setup in bauble.plugins pass def set_cors_headers(): """ You need to add some headers to each request. Don't use the wildcard '*' for Access-Control-Allow-Origin in production. """ response.set_header('Access-Control-Allow-Origin', request.headers.get("Origin")) response.set_header('Access-Control-Allow-Methods', 'PUT, GET, POST, DELETE, OPTIONS, PATCH') response.set_header('Access-Control-Allow-Headers', 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token, Authorization')
import CAT.dataset import CAT.model import CAT.strategy
from django import template register = template.Library() @register.inclusion_tag('ajax/partials/render_ajax.html') def get_ajax_object(strategy, id): return { 'strategy': strategy, 'id': id, }
from __future__ import division import os import torch import argparse import numpy as np from mmcv import Config from mmcv.runner import load_checkpoint from mmcv.parallel import MMDataParallel from utils import (create_logger, set_random_seed, rm_suffix, mkdir_if_no_exists) from dsgcn.datasets import build_dataset, build_dataloader, build_processor from dsgcn.models import build_model def parse_args(): parser = argparse.ArgumentParser(description='Test Cluster Detection') parser.add_argument('--config', help='train config file path') parser.add_argument('--seed', type=int, default=42, help='random seed') parser.add_argument('--processor_type', choices=['det', 'seg'], default='det') parser.add_argument('--work_dir', help='the dir to save logs and models') parser.add_argument('--load_from', help='the checkpoint file to load from') parser.add_argument('--gpus', type=int, default=1, help='number of gpus(only applicable to non-distributed training)') parser.add_argument('--save_output', action='store_true', default=False) parser.add_argument('--no_cuda', action='store_true', default=False) args = parser.parse_args() return args def test_cluster_det(model, dataset, processor, cfg, logger=None): if cfg.load_from: load_checkpoint(model, cfg.load_from) mseloss = torch.nn.MSELoss() losses = [] output_probs = [] if cfg.gpus == 1: data_loader = build_dataloader( dataset, processor, cfg.batch_size_per_gpu, cfg.workers_per_gpu, cfg.gpus, train=False) model = MMDataParallel(model, device_ids=range(cfg.gpus)) if cfg.cuda: model.cuda() model.eval() for i, (x, adj, label) in enumerate(data_loader): with torch.no_grad(): if cfg.cuda: label = label.cuda(non_blocking=True) output = model(x, adj).view(-1) loss = mseloss(output, label).item() losses += [loss] if i % cfg.print_freq == 0: logger.info('[Test] Iter {}/{}: Loss {:.4f}'.format(i, len(data_loader), loss)) if cfg.save_output: prob = output.data.cpu().numpy() output_probs.append(prob) else: raise NotImplementedError avg_loss = sum(losses) / len(losses) logger.info('[Test] Overall Loss {:.4f}'.format(avg_loss)) if cfg.save_output: fn = os.path.basename(cfg.load_from) opath = os.path.join(cfg.work_dir, fn[:fn.rfind('.pth.tar')] + '.npz') meta = { 'tot_inst_num': len(dataset.idx2lb), 'proposal_folders': cfg.test_data.proposal_folders, } print('dump output to {}'.format(opath)) np.savez_compressed(opath, data=output_probs, meta=meta) def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cuda cfg.cuda = not args.no_cuda and torch.cuda.is_available() # set cudnn_benchmark & cudnn_deterministic if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if cfg.get('cudnn_deterministic', False): torch.backends.cudnn.deterministic = True # update configs according to args if not hasattr(cfg, 'work_dir'): if args.work_dir is not None: cfg.work_dir = args.work_dir else: cfg_name = rm_suffix(os.path.basename(args.config)) cfg.work_dir = os.path.join('./data/work_dir', cfg_name) mkdir_if_no_exists(cfg.work_dir, is_folder=True) if not hasattr(cfg, 'processor_type'): cfg.processor_type = args.processor_type if args.load_from is not None: cfg.load_from = args.load_from cfg.gpus = args.gpus cfg.save_output = args.save_output logger = create_logger() # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_model(cfg.model['type'], **cfg.model['kwargs']) for k, v in cfg.model['kwargs'].items(): setattr(cfg.test_data, k, v) test_dataset = build_dataset(cfg.test_data) test_processor = build_processor(cfg.processor_type) test_cluster_det(model, test_dataset, test_processor, cfg, logger) if __name__ == '__main__': main()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import wx from cairis.core.armid import * from ReferencePanel import ReferencePanel import DialogClassParameters __author__ = 'Shamal Faily' class ReferenceDialog(wx.Dialog): def __init__(self,parent,crTypeName,refName = '',desc = '',dimName = ''): wx.Dialog.__init__(self,parent,CHARACTERISTICREFERENCE_ID,'Add Characteristic Reference',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,300)) self.theCharacteristicReferenceType = crTypeName self.theReferenceName = refName self.theDescription = desc self.theDimensionName = dimName self.commitVerb = 'Add' if refName != '': self.commitVerb = 'Edit' self.SetTitle('Edit Characteristic Reference') mainSizer = wx.BoxSizer(wx.VERTICAL) self.panel = ReferencePanel(self,self.theReferenceName,self.theDescription, self.theDimensionName) mainSizer.Add(self.panel,1,wx.EXPAND) self.SetSizer(mainSizer) wx.EVT_BUTTON(self,CHARACTERISTICREFERENCE_BUTTONCOMMIT_ID,self.onCommit) def onCommit(self,evt): commitLabel = self.commitVerb + ' Characteristic Reference' refCtrl = self.FindWindowById(CHARACTERISTICREFERENCE_COMBOREFERENCE_ID) descCtrl = self.FindWindowById(CHARACTERISTICREFERENCE_TEXTDESCRIPTION_ID) dimCtrl = self.FindWindowById(CHARACTERISTICREFERENCE_COMBODIMENSION_ID) self.theReferenceName = refCtrl.GetValue() self.theDescription = descCtrl.GetValue() self.theDimensionName = dimCtrl.GetValue() if len(self.theReferenceName) == 0: dlg = wx.MessageDialog(self,'Reference name cannot be empty',commitLabel,wx.OK) dlg.ShowModal() dlg.Destroy() return if len(self.theDimensionName) == 0: dlg = wx.MessageDialog(self,'Dimension name cannot be empty',commitLabel,wx.OK) dlg.ShowModal() dlg.Destroy() return if len(self.theDescription) == 0: dlg = wx.MessageDialog(self,'Description cannot be empty',commitLabel,wx.OK) dlg.ShowModal() dlg.Destroy() return else: self.EndModal(CHARACTERISTICREFERENCE_BUTTONCOMMIT_ID) def reference(self): return self.theReferenceName def dimension(self): return self.theDimensionName def description(self): return self.theDescription
# coding: utf-8 from __future__ import unicode_literals import codecs import csv import importlib import os import re import lxml.html import requests from invoke import task from opencivicdata.divisions import Division from six import next, StringIO, text_type from six.moves.urllib.parse import urlsplit from unidecode import unidecode # Map Standard Geographical Classification codes to the OCD identifiers of provinces and territories. province_and_territory_codes_memo = {} # Map OpenCivicData Division Identifier to Census type name. ocdid_to_type_name_map = {} def province_and_territory_codes(): if not province_and_territory_codes_memo: for division in Division.all('ca'): if division._type in ('province', 'territory'): province_and_territory_codes_memo[division.attrs['sgc']] = division.id return province_and_territory_codes_memo def csv_reader(url): """ Reads a remote CSV file. """ return csv.reader(StringIO(requests.get(url).text)) def slug(name): return unidecode(text_type(name).lower().translate({ ord(' '): '_', ord("'"): '_', ord('-'): '_', # dash ord('—'): '_', # m-dash ord('–'): '_', # n-dash ord('.'): None, })) def get_definition(division_id, aggregation=False): if not ocdid_to_type_name_map: # Map census division type codes to names. census_division_type_names = {} document = lxml.html.fromstring(requests.get('https://www12.statcan.gc.ca/census-recensement/2011/ref/dict/table-tableau/table-tableau-4-eng.cfm').content) for abbr in document.xpath('//table/tbody/tr/th[1]/abbr'): census_division_type_names[abbr.text_content()] = re.sub(' /.+\Z', '', abbr.attrib['title']) # Map census subdivision type codes to names. census_subdivision_type_names = {} document = lxml.html.fromstring(requests.get('https://www12.statcan.gc.ca/census-recensement/2011/ref/dict/table-tableau/table-tableau-5-eng.cfm').content) for abbr in document.xpath('//table/tbody/tr/th[1]/abbr'): census_subdivision_type_names[abbr.text_content()] = re.sub(' /.+\Z', '', abbr.attrib['title']) # Map OCD identifiers to census types. for division in Division.all('ca'): if division._type == 'cd': ocdid_to_type_name_map[division.id] = census_division_type_names[division.attrs['classification']] elif division._type == 'csd': ocdid_to_type_name_map[division.id] = census_subdivision_type_names[division.attrs['classification']] codes = province_and_territory_codes() division = Division.get(division_id) expected = {} vowels = ('A', 'À', 'E', 'É', 'I', 'Î', 'O', 'Ô', 'U') sections = division_id.split('/') ocd_type, ocd_type_id = sections[-1].split(':') # Determine the module name, name and classification. if ocd_type == 'country': expected['module_name'] = 'ca' expected['name'] = 'Parliament of Canada' elif ocd_type in ('province', 'territory'): pattern = 'ca_{}_municipalities' if aggregation else 'ca_{}' expected['module_name'] = pattern.format(ocd_type_id) if aggregation: expected['name'] = '{} Municipalities'.format(division.name) elif ocd_type_id in ('nl', 'ns'): expected['name'] = '{} House of Assembly'.format(division.name) elif ocd_type_id == 'qc': expected['name'] = 'Assemblée nationale du Québec' else: expected['name'] = 'Legislative Assembly of {}'.format(division.name) elif ocd_type == 'cd': province_or_territory_type_id = codes[ocd_type_id[:2]].split(':')[-1] expected['module_name'] = 'ca_{}_{}'.format(province_or_territory_type_id, slug(division.name)) name_infix = ocdid_to_type_name_map[division_id] if name_infix == 'Regional municipality': name_infix = 'Regional' expected['name'] = '{} {} Council'.format(division.name, name_infix) elif ocd_type == 'csd': province_or_territory_type_id = codes[ocd_type_id[:2]].split(':')[-1] expected['module_name'] = 'ca_{}_{}'.format(province_or_territory_type_id, slug(division.name)) if ocd_type_id[:2] == '24': if division.name[0] in vowels: expected['name'] = "Conseil municipal d'{}".format(division.name) else: expected['name'] = "Conseil municipal de {}".format(division.name) else: name_infix = ocdid_to_type_name_map[division_id] if name_infix in ('Municipality', 'Specialized municipality'): name_infix = 'Municipal' elif name_infix == 'District municipality': name_infix = 'District' elif name_infix == 'Regional municipality': name_infix = 'Regional' expected['name'] = '{} {} Council'.format(division.name, name_infix) elif ocd_type == 'arrondissement': census_subdivision_type_id = sections[-2].split(':')[-1] province_or_territory_type_id = codes[census_subdivision_type_id[:2]].split(':')[-1] expected['module_name'] = 'ca_{}_{}_{}'.format(province_or_territory_type_id, slug(Division.get('/'.join(sections[:-1])).name), slug(division.name)) if division.name[0] in vowels: expected['name'] = "Conseil d'arrondissement d'{}".format(division.name) elif division.name[:3] == 'Le ': expected['name'] = "Conseil d'arrondissement du {}".format(division.name[3:]) else: expected['name'] = "Conseil d'arrondissement de {}".format(division.name) else: raise Exception('{}: Unrecognized OCD type {}'.format(division_id, ocd_type)) # Determine the class name. class_name_parts = re.split('[ -]', re.sub("[—–]", '-', re.sub("['.]", '', division.name))) expected['class_name'] = unidecode(text_type(''.join(word if re.match('[A-Z]', word) else word.capitalize() for word in class_name_parts))) if aggregation: expected['class_name'] += 'Municipalities' # Determine the url. expected['url'] = division.attrs['url'] # Determine the division name. expected['division_name'] = division.name return expected @task def urls(): for module_name in os.listdir('.'): if os.path.isdir(module_name) and module_name not in ('.git', '_cache', '_data', '__pycache__', 'csv', 'disabled'): module = importlib.import_module('{}.people'.format(module_name)) if module.__dict__.get('COUNCIL_PAGE'): print('{:<60} {}'.format(module_name, module.__dict__['COUNCIL_PAGE'])) else: print('{:<60} COUNCIL_PAGE not defined'.format(module_name)) @task def tidy(): # Map OCD identifiers to styles of address. leader_styles = {} member_styles = {} for gid in range(3): reader = csv_reader('https://docs.google.com/spreadsheets/d/11qUKd5bHeG5KIzXYERtVgs3hKcd9yuZlt-tCTLBFRpI/pub?single=true&gid={}&output=csv'.format(gid)) next(reader) for row in reader: key = row[0] leader_styles[key] = row[2] member_styles[key] = row[3] for module_name in os.listdir('.'): division_ids = set() jurisdiction_ids = set() if os.path.isdir(module_name) and module_name not in ('.git', '_cache', '_data', '__pycache__', 'csv', 'disabled') and not module_name.endswith('_candidates'): metadata = module_name_to_metadata(module_name) # Ensure division_id is unique. division_id = metadata['division_id'] if division_id in division_ids: raise Exception('{}: Duplicate division_id {}'.format(module_name, division_id)) else: division_ids.add(division_id) # Ensure jurisdiction_id is unique. jurisdiction_id = metadata['jurisdiction_id'] if jurisdiction_id in jurisdiction_ids: raise Exception('{}: Duplicate jurisdiction_id {}'.format(module_name, jurisdiction_id)) else: jurisdiction_ids.add(jurisdiction_id) expected = get_definition(division_id, bool(module_name.endswith('_municipalities'))) # Ensure presence of url and styles of address. if not member_styles.get(division_id): print('{:<60} No member style of address: {}'.format(module_name, division_id)) if not leader_styles.get(division_id): print('{:<60} No leader style of address: {}'.format(module_name, division_id)) url = metadata['url'] if url and not expected['url']: parsed = urlsplit(url) if parsed.scheme not in ('http', 'https') or parsed.path or parsed.query or parsed.fragment: print('{:<60} Check: {}'.format(module_name, url)) # Warn if the name or classification may be incorrect. name = metadata['name'] if name != expected['name']: print('{:<60} Expected {}'.format(name, expected['name'])) classification = metadata['classification'] if classification != 'legislature': print('{:<60} Expected legislature'.format(classification)) # Name the classes correctly. class_name = metadata['class_name'] if class_name != expected['class_name']: # @note This for-loop will only run if the class name in __init__.py is incorrect. for basename in os.listdir(module_name): if basename.endswith('.py'): with codecs.open(os.path.join(module_name, basename), 'r', 'utf8') as f: content = f.read() with codecs.open(os.path.join(module_name, basename), 'w', 'utf8') as f: content = content.replace(class_name + '(', expected['class_name'] + '(') f.write(content) # Set the division_name and url appropriately. division_name = metadata['division_name'] if division_name != expected['division_name'] or (expected['url'] and url != expected['url']): with codecs.open(os.path.join(module_name, '__init__.py'), 'r', 'utf8') as f: content = f.read() with codecs.open(os.path.join(module_name, '__init__.py'), 'w', 'utf8') as f: if division_name != expected['division_name']: content = content.replace('= ' + division_name, '= ' + expected['division_name']) if expected['url'] and url != expected['url']: content = content.replace(url, expected['url']) f.write(content) # Name the module correctly. if module_name != expected['module_name']: print('{:<60} Expected {}'.format(module_name, expected['module_name'])) @task def sources(): for module_name in os.listdir('.'): if os.path.isdir(module_name) and module_name not in ('.git', '_cache', '_data', '__pycache__', 'csv', 'disabled'): path = os.path.join(module_name, 'people.py') with codecs.open(path, 'r', 'utf-8') as f: content = f.read() if content.count('add_source') < content.count('lxmlize') - 1: # exclude the import print('Add source? {}'.format(path)) def module_name_to_metadata(module_name): """ Copied from `reports.utils`. """ module = importlib.import_module(module_name) for obj in module.__dict__.values(): division_id = getattr(obj, 'division_id', None) if division_id: return { 'class_name': obj.__name__, 'division_id': division_id, 'division_name': getattr(obj, 'division_name', None), 'name': getattr(obj, 'name', None), 'url': getattr(obj, 'url', None), 'classification': getattr(obj, 'classification', None), 'jurisdiction_id': '{}/{}'.format(division_id.replace('ocd-division', 'ocd-jurisdiction'), getattr(obj, 'classification', 'legislature')), }
from dataclasses import dataclass from typing import Dict, List, Union from serde import deserialize, serialize from serde.json import from_json, to_json @deserialize @serialize @dataclass class Foo: v: Union[int, str] c: Union[Dict[str, int], List[int]] def main(): f = Foo(10, [1, 2, 3]) print(f"Into Json: {to_json(f)}") s = '{"v": 10, "c": [1, 2, 3]}' print(f"From Json: {from_json(Foo, s)}") f = Foo('foo', {'bar': 1, 'baz': 2}) print(f"Into Json: {to_json(f)}") s = '{"v": "foo", "c": {"bar": 1, "baz": 2}}' print(f"From Json: {from_json(Foo, s)}") if __name__ == '__main__': main()
from datetime import datetime from fabric.api import env from fabric.colors import yellow from fabric.context_managers import cd, shell_env from fabric.operations import run from fabric.contrib.files import exists GIT_REPO = { 'url': 'https://github.com/m4droid/TransantiagoScanner.git', 'name': 'TransantiagoScanner' } DEPLOY_TIMESTAMP = datetime.now().strftime("%Y-%m-%dT%H:%M:%S") def production(): env.hosts = ['ts.m4droid.com'] env.user = 'tscanner' env.branch = 'master' env.home = '/home/{0:s}'.format(env.user) env.python_env = '{0:s}/pyenv'.format(env.home) def deploy(): prepare_environment() repo_update() repo_activate_version() npm_install() bower_install() set_config_file() grunt_build() def prepare_environment(): print(yellow('\nPreparing environment')) with shell_env(HOME=env.home), cd(env.home): run('mkdir -p repos dists') def repo_update(): print(yellow('\nUpdate repository')) with shell_env(HOME=env.home), cd('{0:s}/repos'.format(env.home)): run( '[ ! -d {name:s} ] && git clone {url:s} || (cd {name:s} && git pull)'.format(**GIT_REPO), ) def repo_activate_version(): print(yellow('\nActivating repository version')) with shell_env(HOME=env.home), cd('{0:s}/repos/{1:s}'.format(env.home, GIT_REPO['name'])): run( 'git checkout {0:s}'.format(env.branch), ) def npm_install(): print(yellow('\nInstalling NPM dependencies')) with shell_env(HOME=env.home), cd('{0:s}/repos/{1:s}'.format(env.home, GIT_REPO['name'])): run('npm install') def bower_install(): print(yellow('\nInstalling Bower dependencies')) with shell_env(HOME=env.home), cd('{0:s}/repos/{1:s}'.format(env.home, GIT_REPO['name'])): run('bower --config.interactive=false cache clean') run('bower --config.interactive=false install') def set_config_file(): with shell_env(HOME=env.home), cd('{0:s}/repos/{1:s}'.format(env.home, GIT_REPO['name'])): if not exists('app/scripts/configs/config.js'): print(yellow('\nSetting config file')) run('cp app/scripts/configs/config.js{.default,}') def grunt_build(): print(yellow('\nBuilding project')) with shell_env(HOME=env.home), cd('{0:s}/repos/{1:s}'.format(env.home, GIT_REPO['name'])): run('grunt build') run('mv dist ~/dists/{0:s}'.format(DEPLOY_TIMESTAMP)) run('rm -f ~/dists/current && ln -s ~/dists/{0:s} ~/dists/current'.format(DEPLOY_TIMESTAMP))
def html_to_json(html_str): items = [item.strip() for item in html_str.split('</li><li>')] return { 'address':{ 'state':items[2], 'district':items[3], 'block':items[4], 'cluster':items[5], 'village':items[7], 'pincode':items[8], }, 'school_profile':{ 'school_name':items[0].replace('<ul><li>',''), 'udise_code':items[1], 'school_category':items[9], 'school_type':items[10], 'class_from':int(items[11]), 'class_to':int(items[12]), 'state_management':items[13], 'national_management':items[14], 'status':items[15], 'location':items[16], }, 'basic_details': { 'aff_board_sec':items[34], 'aff_board_hsec':items[35], 'year_of_establishment':int(items[36]), 'pre_primary':items[37], }, 'facilities': { 'building_status':items[38], 'boundary_wall':items[39], 'no_of_boys_toilets':int(items[40]), 'no_of_girls_toilets':int(items[41]), 'no_of_cwsn_toilets':int(items[42]), 'drinking_water_availability':True if items[43]=='Yes' else False, # yes 'hand_wash_facility':True if items[43]=='Yes' else False, # yes 'functional_generator':int(items[45]), 'library':True if items[46]=='1-Yes' else False, # yes items[46], 'reading_corner':True if items[47]=='1-Yes' else False, # items[47], 'book_bank':True if items[49]=='1-Yes' else False, # items[49], 'functional_laptop':int(items[51]), 'functional_desktop':int(items[52]), 'functional_tablet':int(items[53]), 'functional_scanner':int(items[54]), 'functional_printer':int(items[55]), 'functional_led':int(items[56]), 'functional_digiboard':int(items[57]), 'internet':True if items[58]=='1-Yes' else False, # items[58], 'dth':True if items[59]=='1-Yes' else False, # items[59], 'functional_web_cam':int(items[60]), }, 'room_details':{ 'class_rooms':int(items[61]), 'other_rooms':int(items[62]), }, 'enrolment_of_the_students':[items[63],items[64],items[65],items[66],items[67],items[68],items[69],items[70],items[71],items[72],items[73],items[74],items[75],], # ['37','12','37','12','37','12','37','12','37','12','37','12','last'], 'total_teachers':int(items[76].split()[2]), }
import logging import voluptuous as vol import json from datetime import timedelta import time from homeassistant.components import sensor from custom_components.smartthinq import ( DOMAIN, LGE_DEVICES, LGEDevice) import homeassistant.helpers.config_validation as cv from homeassistant.const import ( ATTR_ENTITY_ID, CONF_NAME, CONF_TOKEN, CONF_ENTITY_ID) import wideq REQUIREMENTS = ['wideq'] DEPENDENCIES = ['smartthinq'] LGE_WATERPURIFIER_DEVICES = 'lge_waterpurifier_devices' CONF_MAC = 'mac' ATTR_COLD_WATER_USAGE_DAY = 'cold_water_usage_day' ATTR_NORMAL_WATER_USAGE_DAY = 'normal_water_usage_day' ATTR_HOT_WATER_USAGE_DAY = 'hot_water_usage_day' ATTR_TOTAL_WATER_USAGE_DAY = 'total_water_usage_day' ATTR_COLD_WATER_USAGE_WEEK = 'cold_water_usage_week' ATTR_NORMAL_WATER_USAGE_WEEK = 'normal_water_usage_week' ATTR_HOT_WATER_USAGE_WEEK = 'hot_water_usage_week' ATTR_TOTAL_WATER_USAGE_WEEK = 'total_water_usage_week' ATTR_COLD_WATER_USAGE_MONTH = 'cold_water_usage_month' ATTR_NORMAL_WATER_USAGE_MONTH = 'normal_water_usage_month' ATTR_HOT_WATER_USAGE_MONTH = 'hot_water_usage_month' ATTR_TOTAL_WATER_USAGE_MONTH = 'total_water_usage_month' ATTR_COLD_WATER_USAGE_YEAR = 'cold_water_usage_year' ATTR_NORMAL_WATER_USAGE_YEAR = 'normal_water_usage_year' ATTR_HOT_WATER_USAGE_YEAR = 'hot_water_usage_year' ATTR_TOTAL_WATER_USAGE_YEAR = 'total_water_usage_year' ATTR_COCKCLEAN_STATE = 'cockcelan_state' ATTR_DEVICE_TYPE = 'device_type' COCKCLEANMODES = { 'WAITING': wideq.STATE_WATERPURIFIER_COCKCLEAN_WAIT, 'COCKCLEANING': wideq.STATE_WATERPURIFIER_COCKCLEAN_ON, } MAX_RETRIES = 5 LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): import wideq refresh_token = hass.data[CONF_TOKEN] client = wideq.Client.from_token(refresh_token) name = config[CONF_NAME] """Set up the LGE WATER PURIFIER components.""" LOGGER.debug("Creating new LGE WATER PURIFIER") LGE_WATERPURIFIER_DEVICES = [] for device_id in (d for d in hass.data[LGE_DEVICES]): device = client.get_device(device_id) model = client.model_info(device) if device.type == wideq.DeviceType.WATER_PURIFIER: name = config[CONF_NAME] mac = device.macaddress model_type = model.model_type if mac == config[CONF_MAC]: waterpurifier_entity = LGEWATERPURIFIERDEVICE(client, device, name, model_type) LGE_WATERPURIFIER_DEVICES.append(waterpurifier_entity) add_entities(LGE_WATERPURIFIER_DEVICES) LOGGER.debug("LGE WATER PURIFIER is added") class LGEWATERPURIFIERDEVICE(LGEDevice): def __init__(self, client, device, name, model_type): """initialize a LGE WATER PURIFIER Device.""" LGEDevice.__init__(self, client, device) import wideq self._wp = wideq.WPDevice(client, device) self._wp.monitor_start() self._wp.monitor_start() self._wp.delete_permission() self._wp.delete_permission() # The response from the monitoring query. self._state = None self._name = name self._type = model_type self.update() @property def name(self): return self._name @property def device_type(self): return self._type @property def supported_features(self): """ none """ @property def state_attributes(self): """Return the optional state attributes.""" data={} data[ATTR_DEVICE_TYPE] = self.device_type data[ATTR_COLD_WATER_USAGE_DAY] = self.cold_water_usage_day data[ATTR_NORMAL_WATER_USAGE_DAY] = self.normal_water_usage_day data[ATTR_HOT_WATER_USAGE_DAY] = self.hot_water_usage_day data[ATTR_TOTAL_WATER_USAGE_DAY] = self.total_water_usage_day data[ATTR_COLD_WATER_USAGE_WEEK] = self.cold_water_usage_week data[ATTR_NORMAL_WATER_USAGE_WEEK] = self.normal_water_usage_week data[ATTR_HOT_WATER_USAGE_WEEK] = self.hot_water_usage_week data[ATTR_TOTAL_WATER_USAGE_WEEK] = self.total_water_usage_week data[ATTR_COLD_WATER_USAGE_MONTH] = self.cold_water_usage_month data[ATTR_NORMAL_WATER_USAGE_MONTH] = self.normal_water_usage_month data[ATTR_HOT_WATER_USAGE_MONTH] = self.hot_water_usage_month data[ATTR_TOTAL_WATER_USAGE_MONTH] = self.total_water_usage_month data[ATTR_COLD_WATER_USAGE_YEAR] = self.cold_water_usage_year data[ATTR_NORMAL_WATER_USAGE_YEAR] = self.normal_water_usage_year data[ATTR_HOT_WATER_USAGE_YEAR] = self.hot_water_usage_year data[ATTR_TOTAL_WATER_USAGE_YEAR] = self.total_water_usage_year data[ATTR_COCKCLEAN_STATE] = self.cockclean_status return data @property def cold_water_usage_day(self): data = self._wp.day_water_usage('C') usage = format((float(data) * 0.001), ".3f") return usage @property def normal_water_usage_day(self): data = self._wp.day_water_usage('N') usage = format((float(data) * 0.001), ".3f") return usage @property def hot_water_usage_day(self): data = self._wp.day_water_usage('H') usage = format((float(data) * 0.001), ".3f") return usage @property def total_water_usage_day(self): cold = self.cold_water_usage_day normal = self.normal_water_usage_day hot = self.hot_water_usage_day total = format((float(cold) + float(normal) + float(hot)), ".3f") return total @property def cold_water_usage_week(self): data = self._wp.week_water_usage('C') usage = format((float(data) * 0.001), ".3f") return usage @property def normal_water_usage_week(self): data = self._wp.week_water_usage('N') usage = format((float(data) * 0.001), ".3f") return usage @property def hot_water_usage_week(self): data = self._wp.week_water_usage('H') usage = format((float(data) * 0.001), ".3f") return usage @property def total_water_usage_week(self): cold = self.cold_water_usage_week normal = self.normal_water_usage_week hot = self.hot_water_usage_week total = format((float(cold) + float(normal) + float(hot)), ".3f") return total @property def cold_water_usage_month(self): data = self._wp.month_water_usage('C') usage = format((float(data) * 0.001), ".3f") return usage @property def normal_water_usage_month(self): data = self._wp.month_water_usage('N') usage = format((float(data) * 0.001), ".3f") return usage @property def hot_water_usage_month(self): data = self._wp.month_water_usage('H') usage = format((float(data) * 0.001), ".3f") return usage @property def total_water_usage_month(self): cold = self.cold_water_usage_month normal = self.normal_water_usage_month hot = self.hot_water_usage_month total = format((float(cold) + float(normal) + float(hot)), ".3f") return total @property def cold_water_usage_year(self): data = self._wp.year_water_usage('C') usage = format((float(data) * 0.001), ".3f") return usage @property def normal_water_usage_year(self): data = self._wp.year_water_usage('N') usage = format((float(data) * 0.001), ".3f") return usage @property def hot_water_usage_year(self): data = self._wp.year_water_usage('H') usage = format((float(data) * 0.001), ".3f") return usage @property def total_water_usage_year(self): cold = self.cold_water_usage_year normal = self.normal_water_usage_year hot = self.hot_water_usage_year total = format((float(cold) + float(normal) + float(hot)), ".3f") return total @property def cockclean_status(self): if self._state: mode = self._state.cockclean_state return COCKCLEANMODES[mode.name] def update(self): import wideq LOGGER.info('Updating %s.', self.name) for iteration in range(MAX_RETRIES): LOGGER.info('Polling...') try: state = self._wp.poll() except wideq.NotLoggedInError: LOGGER.info('Session expired. Refreshing.') self._client.refresh() self._wp.monitor_start() self._wp.monitor_start() self._wp.delete_permission() self._wp.delete_permission() continue if state: LOGGER.info('Status updated.') self._state = state self._client.refresh() self._wp.monitor_start() self._wp.monitor_start() self._wp.delete_permission() self._wp.delete_permission() return LOGGER.info('No status available yet.') time.sleep(2 ** iteration) # We tried several times but got no result. This might happen # when the monitoring request gets into a bad state, so we # restart the task. LOGGER.warn('Status update failed.') self._wp.monitor_start() self._wp.monitor_start() self._wp.delete_permission() self._wp.delete_permission()
import typing as typ from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa PrivateKey = typ.Union[ dsa.DSAPrivateKey, ec.EllipticCurvePrivateKey, rsa.RSAPrivateKey ] PrivateKeyWithSerialization = typ.Union[ dsa.DSAPrivateKeyWithSerialization, ec.EllipticCurvePrivateKeyWithSerialization, rsa.RSAPrivateKeyWithSerialization ] PublicKey = typ.Union[ dsa.DSAPublicKey, ec.EllipticCurvePublicKey, rsa.RSAPublicKey, ] PublicKeyWithSerialization = typ.Union[ dsa.DSAPublicKeyWithSerialization, ec.EllipticCurvePublicKeyWithSerialization, rsa.RSAPublicKeyWithSerialization, ]
import hashlib import threading import urllib import time import requests import random import json from voicecontrollermodel.voice_and_text import Voice2text, Text2voice from PyQt5.QtCore import QThread, pyqtSignal, QObject class Chat_Tread(QThread): # 聊天线程 chatting_signal = pyqtSignal(str, bool) def __init__(self, parent, voiceid=101001, sz=True): super(Chat_Tread, self).__init__() self.text2voice_player = Text2voice() self.voice2texter = Voice2text() self.parent = parent self.sz = sz self.voiceid = voiceid def run(self): voices = {"亲和女声": 0, "亲和男声": 1, "成熟男声": 2, "温暖女声": 4, "情感女声": 5, "情感男声": 6, "客服女声": 7, "智侠|情感男声": 1000, "智瑜|情感女声": 1001, "智聆|通用女声": 1002, "智美|客服女声": 1003, "WeJack|英文男声": 1050, "WeRose|英文女声": 1051, "智侠|情感男声(精)": 101000, "智瑜|情感女声(精)": 101001, "智聆|通用女声(精)": 101002, "智美|客服女声(精)": 101003, "智云|通用男声": 101004, "智莉|通用女声": 101005, "智言|助手女声": 101006, "智娜|客服女声": 101007, "智琪|客服女声": 101008, "智芸|知性女声": 101009, "智华|通用男声": 101010, "WeJack|英文男声(精)": 101050, "WeRose|英文女声(精)": 101051, "贝蕾|客服女声": 102000, "贝果|客服女声": 102001, "贝紫|粤语女声": 102002, "贝雪|新闻女声": 102003} while True: question = self.voice2texter.record2text(Bd=False) if '嗯' in question: print("嗯恩不识别") continue if question == "听不到任何声音": continue self.chatting_signal.emit(question, True) if "聊天" in question: if "退出" in question or "关闭" in question: self.parent.chating = False break elif "切换" in question: if "声音" in question: self.voiceid = list(voices.values())[random.randint(0, len(voices) - 1)] self.text2voice_player.get_voice_and_paly_it("已切换播报人id为{}".format(self.voiceid)) continue if self.sz: ans = self.parent.get_sizhibot_response(question) else: ans = self.parent.get_chatter_response(question) # self.chatting_signal.emit(ans, False) self.text2voice_player.get_voice_and_paly_it(ans, self.voiceid) if not self.parent.chating: break self.text2voice_player.play("voicecontrollermodel/inandoutchating_file/out{}.wav".format(self.voiceid)) self.parent.chating = False class CommenThread(threading.Thread): def __init__(self, func, *args): super().__init__() self.func = func self.args = args def run(self): alen = len(self.args) if alen == 0: self.func() elif alen == 1: self.func(self.args[0]) elif alen == 2: self.func(self.args[0], self.args[1]) class Chatter(QObject): chatter_response_singal = pyqtSignal(str, bool) def __init__(self): super(Chatter, self).__init__() self.url = r"https://api.ai.qq.com/fcgi-bin/nlp/nlp_textchat" self.chating = False def get_sizhibot_response(self, question): if len(question) == 0: question = "空文本" info = question.encode('utf-8') url = 'https://api.ownthink.com/bot' data = {u"appid": "db1b2a88a62c7650d74bee4d863f1853", "spoken": info, "userid": "test"} try: response = requests.post(url, data).content except: print('聊天出错') s = "聊天出错!请确保网络畅通!" else: res = json.loads(response) s = res['data']['info']['text'] print("回答:", s) self.chatter_response_singal.emit(s, False) return s def __get_sign_code(self, params, app_key="TTkZvr74cJHQWQxR"): """ 生成签名CODE 1. 计算步骤 用于计算签名的参数在不同接口之间会有差异,但算法过程固定如下4个步骤。 将<key, value>请求参数对按key进行字典升序排序,得到有序的参数对列表N 将列表N中的参数对按URL键值对的格式拼接成字符串,得到字符串T(如:key1=value1&key2=value2),URL键值拼接过程value部分需要URL编码,URL编码算法用大写字母,例如%E8,而不是小写%e8 将应用密钥以app_key为键名,组成URL键值拼接到字符串T末尾,得到字符串S(如:key1=value1&key2=value2&app_key=密钥) 对字符串S进行MD5运算,将得到的MD5值所有字符转换成大写,得到接口请求签名 2. 注意事项 不同接口要求的参数对不一样,计算签名使用的参数对也不一样 参数名区分大小写,参数值为空不参与签名 URL键值拼接过程value部分需要URL编码 签名有效期5分钟,需要请求接口时刻实时计算签名信息 :param params: 参数字典 :param app_key: :return: """ if params is None or type(params) != dict or len(params) == 0: return try: params = sorted(params.items(), key=lambda x: x[0]) _str = '' for item in params: key = item[0] value = item[1] if value == '': continue _str += urllib.parse.urlencode({key: value}) + '&' _str += 'app_key=' + app_key _str = hashlib.md5(_str.encode('utf-8')).hexdigest() return _str.upper() except Exception as e: print(e) def __get_random_str(self, n=17): s = "qwertyuiop7894561230asdfghjklzxcvbnm" rs = '' for i in range(n): rs += s[random.randint(0, 15)] return rs def get_chatter_response(self, question): params = {"app_id": 2154786206, "time_stamp": int(time.time()), "nonce_str": self.__get_random_str(), "session": 10000, "question": question, } params["sign"] = self.__get_sign_code(params) response = requests.get(self.url, params=params) js = response.json() if js['msg'] == 'chat answer not found': answer = '蜂鸟不能理解你的意思' else: print(js, "get_chatter_response") answer = js["data"]["answer"] print("回答:", answer) return answer def open_chat(self, voiceid=101001, func=None): self.chating = True self.chatthread = Chat_Tread(self, voiceid) if func is not None: self.chatthread.chatting_signal.connect(func) self.chatthread.start() # def __auto_chat(self, voiceid, sz=True): # voices = {"亲和女声": 0, "亲和男声": 1, "成熟男声": 2, "温暖女声": 4, "情感女声": 5, "情感男声": 6, "客服女声": 7, # "智侠|情感男声": 1000, "智瑜|情感女声": 1001, "智聆|通用女声": 1002, "智美|客服女声": 1003, "WeJack|英文男声": 1050, # "WeRose|英文女声": 1051, # "智侠|情感男声(精)": 101000, "智瑜|情感女声(精)": 101001, "智聆|通用女声(精)": 101002, "智美|客服女声(精)": 101003, # "智云|通用男声": 101004, "智莉|通用女声": 101005, "智言|助手女声": 101006, "智娜|客服女声": 101007, "智琪|客服女声": 101008, # "智芸|知性女声": 101009, "智华|通用男声": 101010, "WeJack|英文男声(精)": 101050, "WeRose|英文女声(精)": 101051, # "贝蕾|客服女声": 102000, "贝果|客服女声": 102001, "贝紫|粤语女声": 102002, "贝雪|新闻女声": 102003} # self.text2voice_player = Text2voice() # self.voice2texter = Voice2text() # while True: # question = self.voice2texter.record2text(Bd=False) # if '嗯' in question: # print("嗯恩不识别") # continue # if question == "听不到任何声音": # continue # # elif "聊天" in question: # if "退出" in question or "关闭" in question: # self.chating = False # break # elif "切换" in question: # if "声音" in question: # voiceid = list(voices.values())[random.randint(0, len(voices) - 1)] # self.text2voice_player.get_voice_and_paly_it("已切换播报人id为{}".format(voiceid)) # continue # if sz: # ans = self.get_sizhibot_response(question) # else: # ans = self.get_chatter_response(question) # self.text2voice_player.get_voice_and_paly_it(ans, voiceid) # if not self.chating: # break # self.text2voice_player.play("voicecontrollermodel/inandoutchating_file/out{}.wav".format(voiceid)) def close_chat(self): # if self.chatthread.is_alive(): self.chating = False if __name__ == '__main__': # from text2voice import Text2voice # text2voicer = Text2voice() chatter = Chatter() # chatter.open_chat() chatter.get_sizhibot_response('年') # ans = chatter.get_chatter_response('你好') # text2voicer.get_voice_and_paly_it("好,那咱就说定了",True)
from sys import argv import json in_filepath = argv[1] in_file = open(in_filepath, 'r') jsonParsed = json.load(in_file) x_width = jsonParsed["grid_size"][0] y_height = jsonParsed["grid_size"][1] def out_writeline(indent, line): str_out = "" for _ in range(indent): str_out += '\t' str_out += line print(str_out) def code_c_line(x1, y1, x2, y2): return "size += GO_drawLine(SAFE_PTR(start, size), x+" + str(x1) + "*height_factor, y+" + str(y1) + "*height_factor, x+" + str(x2) + "*height_factor, y+" + str(y2) + "*height_factor, 1, intensity);" def code_c_point(x, y): return "size += GO_drawPoint(SAFE_PTR(start, size), x+" + str(x) + "*height_factor, y+" + str(y) + "*height_factor, intensity*2);" out_writeline(0,"#include \"GraphicObject.h\"") out_writeline(0,"") out_writeline(0,"size_t GO_drawChar(void* start, char c, float x, float y, float height, int intensity) {") out_writeline(1,"size_t size = 0;") out_writeline(1,"float height_factor = height / 7.;") out_writeline(1,"switch(c) {") jsonCharDict = jsonParsed["char"] for char in jsonCharDict: if(char != '\'' and char != '\\'): out_writeline(2, "case '" + char + "':") else: out_writeline(2, "case '\\" + char + "':") char_points = jsonCharDict[char] for i in range(1, len(char_points)): if(char_points[i-1] >= 0 and char_points[i] >= 0): x1 = char_points[i-1] % x_width y1 = y_height - char_points[i-1] // x_width x2 = char_points[i] % x_width y2 = y_height - char_points[i] // x_width if(x1 == x2 and y1 == y2): out_writeline(3, code_c_point(x1, y1)) else: out_writeline(3, code_c_line(x1, y1, x2, y2)) out_writeline(3, "break;") out_writeline(2, "default:") out_writeline(3, code_c_line(0, 0, 4, 0)) out_writeline(3, code_c_line(4, 0, 4, 7)) out_writeline(3, code_c_line(4, 7, 0, 7)) out_writeline(3, code_c_line(0, 7, 0, 0)) out_writeline(3, code_c_line(0, 0, 4, 7)) out_writeline(3, code_c_line(0, 7, 4, 0)) out_writeline(3, "break;") out_writeline(1, "}") out_writeline(1, "return size;") out_writeline(0, "}") in_file.close()
from csv import reader from io import TextIOWrapper from json import load from typing import List, Union from uuid import uuid4 from media import Movie, LimitedSeries, Podcast, TVShow, Season, Episode from options import options def json_to_media(file: Union[TextIOWrapper, str]) -> List[Union[Movie, LimitedSeries, Podcast, TVShow]]: """Converts the specified JSON file or filename into a Media object :param file: The file object or filename of the JSON file to load :raises FileNotFoundError: If the specified filename was not found on the system :raises TypeError: If the JSON file holds an unknown object type """ # Load the JSON file if isinstance(file, str): file = open(file, "r") media_list = load(file) for i in range(len(media_list)): media = media_list[i] if "type" not in media or media["type"] not in ["Movie", "TVShow", "Podcast", "LimitedSeries"]: raise TypeError(f"The media JSON object at index {i} does not have a valid type descriptor") if "id" not in media: media["id"] = str(uuid4()) if media["type"] == "Movie": media_list[i] = Movie(json=media) elif media["type"] == "TVShow": media_list[i] = TVShow(json=media) elif media["type"] == "Podcast": media_list[i] = Podcast(json=media) elif media["type"] == "LimitedSeries": media_list[i] = LimitedSeries(json=media) return media_list def csv_to_media(file: Union[TextIOWrapper, str]) -> List[Union[Movie, LimitedSeries, Podcast, TVShow]]: """Converts the specified CSV file or filename into a Media object :param file: The file object or filename of the CSV file to load :raises FileNotFoundError: If the specified filename was not found on the system :raises TypeError: If the CSV file holds an unknown object type :raises KeyError: If the Streaming Provider or Person given for a Media object is invalid """ # Load the CSV file and split into lines if isinstance(file, str): file = open(file, "r") r = reader(file) contents = [line for line in r if len(line) > 0] # Parse the lines to create Media objects from last = 0 media_list = [] content_list = [] for i in range(len(contents)): line = contents[i] if line[0] in ["Movie", "LimitedSeries", "Podcast", "TVShow"]: media_content = contents[last:i] if len(media_content) > 0: content_list.append(media_content) last = i content_list.append(contents[last:]) # Iterate through the content list to turn content into Media objects for media_content in content_list: if media_content[0][0] == "Movie": name, runtime, provider, person, started, finished = media_content[1] if provider not in options.get_providers(): raise KeyError(f"{provider} does not exist in your Streaming Providers") if person not in options.get_persons(): raise KeyError(f"{person} does not exist in your Person list") media_list.append(Movie( name, int(runtime), provider, person, started=started == "True", finished=finished == "True" )) elif media_content[0][0] == "LimitedSeries": name, provider, person, started, finished = media_content[1] if provider not in options.get_providers(): raise KeyError(f"{provider} does not exist in your Streaming Providers") if person not in options.get_persons(): raise KeyError(f"{person} does not exist in your Person list") episodes = [] for episode in media_content[2:]: try: s_num, e_num, e_name, runtime, watched = episode except ValueError as e: raise ValueError(f"{e} -> {episode}") episodes.append(Episode( int(s_num), int(e_num), e_name, int(runtime), watched=watched == "True" )) media_list.append(LimitedSeries( name, provider, person, episodes, started=started == "True", finished=finished == "True" )) elif media_content[0][0] in ["Podcast", "TVShow"]: name, provider, person, started, finished = media_content[1] if provider not in options.get_providers(): raise KeyError(f"{provider} does not exist in your Streaming Providers") if person not in options.get_persons(): raise KeyError(f"{person} does not exist in your Person list") seasons = {} for episode in media_content[2:]: try: s_num, e_num, e_name, runtime, watched = episode except ValueError as e: raise ValueError(f"{e} -> {episode}") if int(s_num) not in seasons: seasons[int(s_num)] = [] seasons[int(s_num)].append(Episode( int(s_num), int(e_num), e_name, int(runtime), watched=watched == "True" )) seasons = [Season(season, seasons[season]) for season in seasons] if media_content[0][0] == "Podcast": media_list.append(Podcast( name, provider, person, seasons, started=started == "True", finished=finished == "True" )) else: media_list.append(TVShow( name, provider, person, seasons, started=started == "True", finished=finished == "True" )) return media_list
# Generated by Django 3.0.4 on 2020-04-13 15:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0051_auto_20200413_1233'), ] operations = [ migrations.AlterField( model_name='discente', name='matricula', field=models.CharField(max_length=15, unique=True), ), ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Mar 5 06:02:51 2021 Load GEMINI output corresponding to synthetic Poynting flux, current density (parallel), and electric field (perp.). Attempt to process the data into conductances @author: zettergm """ # imports import numpy as np import scipy.io as spio import matplotlib.pyplot as plt import scipy.interpolate, scipy.sparse, scipy.sparse.linalg #from plot_fns import plotSigmaP_debug from scen1_numerical import div2D,grad2D,FDmat2D,laplacepieces2D,mag2xy,linear_scen1 # setup plt.close("all") flagSigP_debug=False flagdebug=True # Load synthetic data maps and organize data, permute/transpose arrays as lat,lon for plotting # squeeze 1D arrays for plotting as well # We presume all of the data are organized as (z),x,y upon input filename="/Users/zettergm/Pegasusr4i/Dropbox (Personal)/shared/shared_simulations/arcs/scen1.mat" data=spio.loadmat(filename) E=np.asarray(data["E"],dtype="float64") # do not use directly in calculations due to r,theta,phi basis. Ex=np.squeeze(E[0,:,:,1]); Ey=np.squeeze(E[0,:,:,2]); Jpar=np.asarray(data["Jpar"],dtype="float64") # already indexed x,y Spar=np.asarray(data["Spar"],dtype="float64") # indexed x,y mlon=np.asarray(data["mlon"],dtype="float64") mlon=mlon.squeeze() mlat=np.asarray(data["mlat"],dtype="float64") mlat=mlat.squeeze() SigmaP_ref=np.asarray(data["SIGP"],dtype="float64") # indexed as x,y already SigmaH_ref=np.abs(np.asarray(data["SIGH"],dtype="float64")) # indexed as x,y already; convert to positive Hall conductance mlonp=np.asarray(data["mlonp"],dtype="float64") mlonp=mlonp.squeeze() mlatp=np.asarray(data["mlatp"],dtype="float64") mlatp=mlatp.squeeze() int_ohmic_ref=np.asarray(data["int_ohmic"]) # this computed via integration of 3D dissipation; indexed x,y ohmic_ref=np.asarray(data["ohmici"]) # map magnetic coordinates to local Cartesian to facilitate differencing and "fitting" [x,y]=mag2xy(mlon,mlat) lx=x.size; ly=y.size; # add noise to "measurements" noisefrac=0 Jpar=Jpar+noisefrac*max(Jpar.flatten())*np.random.randn(lx,ly) Spar=Spar+noisefrac*max(Spar.flatten())*np.random.randn(lx,ly) # Try to convert Spar to conductance, using steady-state integrated Poynting thm. magE2=Ex**2+Ey**2 magE=np.sqrt(magE2) SigmaP=-Spar/magE2 # compute E x bhat; Take bhat to be in the minus z-direction (assumes northern hemis.) Erotx=-Ey Eroty=Ex # flatten data vectors jvec=Jpar.flatten(order="F") svec=Spar.flatten(order="F") # Now try to estimate the Hall conductance using current continuity... We could # formulate this as an estimation problem which the two conductances were estimated # subject to the approximate constraints dictated by the conservation laws. # 1) try finite difference decomposition (non-parametric) # 2) basis expansion version if conditioning is poor (it seems to be) [A,b,UL,UR,LL,LR,LxH,LyH,divE]=linear_scen1(x,y,Ex,Ey,Erotx,Eroty,Jpar,Spar) # regularization of the problem ("regular" Tikhonov) regparm=1e-14 #regparm=1e-9 regkern=scipy.sparse.eye(2*lx*ly,2*lx*ly) bprime=A.transpose()@b Aprime=(A.transpose()@A + regparm*regkern) sigsreg=scipy.sparse.linalg.spsolve(Aprime,bprime,use_umfpack=True) sigPreg=np.reshape(sigsreg[0:lx*ly],[lx,ly],order="F") sigHreg=np.reshape(sigsreg[lx*ly:],[lx,ly],order="F") # Tikhonov curvature regularization regparm=1e-14 #regparm=1e-9 scale=np.ones((lx,ly)) [L2x,L2y]=laplacepieces2D(x,y,scale,scale) regkern=scipy.sparse.block_diag((L2x+L2y,L2x+L2y),format="csr") bprime=A.transpose()@b Aprime=(A.transpose()@A + regparm*regkern) sigsreg2=scipy.sparse.linalg.spsolve(Aprime,bprime,use_umfpack=True) sigPreg2=np.reshape(sigsreg2[0:lx*ly],[lx,ly],order="F") sigHreg2=np.reshape(sigsreg2[lx*ly:],[lx,ly],order="F") # test various subcomponents of inverse problem # first a sanity check on the Poynting thm. (lower left block of full matrix) ALL=LL; bLL=svec; xLL=scipy.sparse.linalg.spsolve(ALL,bLL) sigPLL=np.reshape(xLL,[lx,ly]) sigPLL=sigPLL.transpose() # try to use FD matrices to do a gradient to check that operators are being formed correctly #thetap=np.pi/2-np.deg2rad(mlatp) #meanthetap=np.average(thetap) #phip=np.deg2rad(mlonp) #meanphip=np.average(phip) #southdistp=Re*(thetap-meanthetap) #yp=np.flip(southdistp,axis=0) #xp=Re*np.sin(meanthetap)*(phip-meanphip) [xp,yp]=mag2xy(mlonp,mlatp) interpolant=scipy.interpolate.interp2d(xp,yp,SigmaP_ref.transpose()) # transpose to y,x SigmaP_refi=(interpolant(x,y)).transpose() # transpose back to x,y SigPvec=SigmaP_refi.flatten(order="F") [Lx,Ly]=FDmat2D(x,y,np.ones(Ex.shape),np.ones(Ey.shape)) gradSigPxvec=Lx@SigPvec gradSigPxmat=np.reshape(gradSigPxvec,[lx,ly],order="F") gradSigPyvec=Ly@SigPvec gradSigPymat=np.reshape(gradSigPyvec,[lx,ly],order="F") # next try a system with no Hall current divergence (this is already nearly the case for our test example) AUL=UL IUL=scipy.sparse.eye(lx*ly,lx*ly) regparm2=1e-16 AULprime=(AUL.transpose()@AUL+regparm2*IUL) bUL=jvec bULprime=AUL.transpose()@bUL xUL=scipy.sparse.linalg.spsolve(AULprime,bULprime) sigPUL=np.reshape(xUL,[lx,ly],order="F") # just current continuity with Pedersen terms requires regularization what about adding in the Poynting thm. to the inversion??? AULLL=scipy.sparse.vstack([UL,LL]) # overdetermined system bULLLprime=AULLL.transpose()@b AULLLprime=(AULLL.transpose()@AULLL) # don't regularize since overdetermined, simple Moore-Penrose approach xULLL=scipy.sparse.linalg.spsolve(AULLLprime,bULLLprime) sigPULLL=np.reshape(xULLL,[lx,ly],order="F") # now try to recover the current density from matrix-computed conductivity gradients as a check # note that we neglect hall currents for now since they are small jvectest=UL@SigPvec jvectestmat=np.reshape(jvectest,[lx,ly],order="F") # compute the projection of the Hall conductance gradient using matrix operators interpolant=scipy.interpolate.interp2d(xp,yp,SigmaH_ref.transpose()) SigmaH_refi=(interpolant(x,y)).transpose() SigHvec=SigmaH_refi.flatten(order="F") gradSigHprojvec=(LxH+LyH)@SigHvec gradSigHprojmat=np.reshape(gradSigHprojvec,[lx,ly],order="F") # recover current density from operator with the Hall terms SigHvec=SigmaH_refi.flatten(order="F") jvectest2=UL@SigPvec+UR@SigHvec jvectest2mat=np.reshape(jvectest2,[lx,ly],order="F") # Alternatively we can algebraicaly compute the gradient of Hall conductance given # Pedersen conductance. Then can execute a line integral to get the Hall term. # We do need to choose a location with very low Pedersen conductance for our reference # Hall conductance location. The issue is that this only gives the the projection along # the ExB direction so this may not be a suitable option!!! [gradSigPx,gradSigPy]=grad2D(SigmaP,x,y) gradSigHproj=Jpar+gradSigPx*Ex+gradSigPy*Ey+SigmaP*divE # Hall term from current continuity # Hall term computed from finite differences. [gradSigHx,gradSigHy]=grad2D(SigmaH_refi,x,y) gradSigHprojFD=Erotx*gradSigHx+Eroty*gradSigHy # check some of the calculations, gradients, divergences if flagdebug: plt.subplots(2,3,dpi=100) plt.subplot(2,3,1) plt.pcolormesh(x,y,-(divE*SigmaP_refi).transpose()) plt.colorbar() plt.title("$-\Sigma_P ( \\nabla \cdot \mathbf{E} )$") plt.clim(-1.5e-5,1.5e-5) plt.subplot(2,3,2) plt.pcolormesh(x,y,(-gradSigPx*Ex-gradSigPy*Ey).transpose()) plt.colorbar() plt.title("$-\\nabla \Sigma_P \cdot \mathbf{E}$") plt.clim(-1.5e-5,1.5e-5) plt.subplot(2,3,3) plt.pcolormesh(x,y,(Erotx*gradSigHx+Eroty*gradSigHy).transpose()) plt.colorbar() plt.title("$\\nabla \Sigma_H \cdot ( \mathbf{E} \\times \hat{b} )$") plt.clim(-1.5e-5,1.5e-5) plt.subplot(2,3,4) plt.pcolormesh(x,y,(Erotx*gradSigHx+Eroty*gradSigHy \ -gradSigPx*Ex-gradSigPy*Ey \ -divE*SigmaP_refi).transpose() ) plt.colorbar() plt.title("Current density (all terms)") plt.clim(-1.5e-5,1.5e-5) plt.subplot(2,3,5) plt.pcolormesh(x,y,Jpar.transpose()) plt.colorbar() plt.title("Current density (model)") plt.clim(-1.5e-5,1.5e-5) plt.show(block=False) if flagdebug: plt.subplots(1,3,dpi=100) plt.subplot(1,3,1) plt.pcolormesh(mlon,mlat,SigmaP.transpose()) plt.title("Estimated Pedersen") plt.colorbar() plt.clim(0,38) plt.subplot(1,3,2) plt.pcolormesh(mlonp,mlatp,SigmaP_ref.transpose()) plt.title("Reference Pedersen") plt.colorbar() plt.clim(0,38) plt.subplot(1,3,3) plt.pcolormesh(mlonp,mlatp,SigmaH_ref.transpose()) plt.title("Reference Hall") plt.colorbar() plt.clim(0,60) plt.show(block=False) if flagdebug: plt.subplots(1,3) plt.subplot(1,3,1) plt.pcolormesh(x,y,gradSigPx.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.colorbar() plt.title("Numerical $\\nabla \Sigma_P \cdot \mathbf{e}_x$") plt.subplot(1,3,2) plt.pcolormesh(x,y,gradSigPy.transpose()) plt.xlabel("x (km)") plt.colorbar() plt.title("Numerical $\\nabla \Sigma_P \cdot \mathbf{e}_y$") plt.subplot(1,3,3) plt.pcolormesh(x,y,divE.transpose()) plt.xlabel("x (km)") plt.colorbar() plt.title("Numerical $\\nabla \cdot \mathbf{E}$") plt.show(block=False) if flagdebug: plt.subplots(1,2,dpi=100) plt.subplot(1,2,1) plt.pcolormesh(x,y,gradSigPxmat.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.colorbar() plt.title("Matrix $\\nabla \Sigma_P \cdot \mathbf{e}_x$") plt.subplot(1,2,2) plt.pcolormesh(x,y,gradSigPymat.transpose()) plt.xlabel("x (km)") plt.colorbar() plt.title("Matrix $\\nabla \Sigma_P \cdot \mathbf{e}_y$") plt.show(block=False) if flagdebug: plt.subplots(1,3) plt.subplot(1,3,1) plt.pcolormesh(x,y,jvectestmat.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.colorbar() plt.title("$J_\parallel$ (matrix sans Hall)") plt.subplot(1,3,2) plt.pcolormesh(x,y,Jpar.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.colorbar() plt.title("$J_\parallel$ from model") plt.subplot(1,3,3) plt.pcolormesh(x,y,jvectest2mat.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.colorbar() plt.title("$J_\parallel$ (matrix with Hall)") plt.show(block=False) if flagdebug: plt.subplots(1,3) plt.subplot(1,3,1) plt.pcolormesh(x,y,gradSigHproj.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.colorbar() plt.title("Projection of ${\\nabla \Sigma_H}$ (CC)") plt.clim(-3e-6,3e-6) plt.subplot(1,3,2) plt.pcolormesh(x,y,gradSigHprojmat.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.colorbar() plt.title("Projection of ${\\nabla \Sigma_H}$ (matrix)") plt.clim(-3e-6,3e-6) plt.subplot(1,3,3) plt.pcolormesh(x,y,gradSigHprojFD.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.colorbar() plt.title("Projection of ${\\nabla \Sigma_H}$ (FD)") plt.show(block=False) plt.clim(-3e-6,3e-6) if flagdebug: plt.subplots(1,2) plt.subplot(1,2,1) plt.pcolormesh(x,y,sigPreg.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.title("Full Operator, norm regularized: $\Sigma_P$") plt.colorbar() plt.subplot(1,2,2) plt.pcolormesh(x,y,sigHreg.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.title("Full Operator, norm egularized: $\Sigma_H$") plt.colorbar() plt.show(block=False) if flagdebug: plt.subplots(1,2) plt.subplot(1,2,1) plt.pcolormesh(x,y,sigPreg2.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.title("Full Operator, curvature regularized: $\Sigma_P$") plt.colorbar() plt.clim(0,38) plt.subplot(1,2,2) plt.pcolormesh(x,y,sigHreg2.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.title("Full Operator, curvature regularized $\Sigma_H$") plt.colorbar() plt.clim(0,60) plt.show(block=False) if flagdebug: plt.subplots(1,2) plt.subplot(1,2,1) plt.pcolormesh(x,y,sigPLL.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.title("$\Sigma_P$ via Poynting") plt.colorbar() plt.subplot(1,2,2) plt.pcolormesh(x,y,sigPUL.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.title("$\Sigma_P$ via current continuity") plt.colorbar() plt.show(block=False) if flagdebug: plt.figure(dpi=100) plt.pcolormesh(x,y,sigPULLL.transpose()) plt.xlabel("x (km)") plt.ylabel("y (km)") plt.title("$\Sigma_P$ via current continuity and Poynting combined") plt.colorbar() plt.show(block=False) # do some extra debug plots? if flagSigP_debug: # Recompute Ohmic dissipation (field-integrated) as a test [MLON,MLAT]=np.meshgrid(mlon,mlat) SigmaP_refi=scipy.interpolate.interpn((mlonp,mlatp),np.transpose(SigmaP_ref),(MLON,MLAT)) # needs to be permuted as lon,lat dissipation=SigmaP_refi*magE2 # plotSigmaP_debug(mlon,mlat,mlonp,mlatp,Spar,Eperp,dissipation,int_ohmic_ref, \ # SigmaP_ref,SigmaP_refi,magE2)
# coding: utf-8 """ Server API Reference for Server API (REST/Json) OpenAPI spec version: 2.0.9 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class UpdateCartRuleRequest(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, id_customer=None, description=None, priority=None, partial_use=None, code=None, active=None, name=None, date_from=None, date_to=None, quantity=None, quantity_per_user=None, minimum_amount=None, minimum_amount_tax=None, minimum_amount_currency=None, every_recurring_payments=None, reduction_percent=None, reduction_amount=None, reduction_currency=None, reduction_tax=None, restriction_groups=None): """ UpdateCartRuleRequest - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id_customer': 'int', 'description': 'str', 'priority': 'int', 'partial_use': 'bool', 'code': 'str', 'active': 'bool', 'name': 'list[I18nField]', 'date_from': 'str', 'date_to': 'str', 'quantity': 'int', 'quantity_per_user': 'int', 'minimum_amount': 'int', 'minimum_amount_tax': 'int', 'minimum_amount_currency': 'int', 'every_recurring_payments': 'bool', 'reduction_percent': 'float', 'reduction_amount': 'float', 'reduction_currency': 'int', 'reduction_tax': 'int', 'restriction_groups': 'list[CartRuleRestrictionGroup]' } self.attribute_map = { 'id_customer': 'id_customer', 'description': 'description', 'priority': 'priority', 'partial_use': 'partial_use', 'code': 'code', 'active': 'active', 'name': 'name', 'date_from': 'date_from', 'date_to': 'date_to', 'quantity': 'quantity', 'quantity_per_user': 'quantity_per_user', 'minimum_amount': 'minimum_amount', 'minimum_amount_tax': 'minimum_amount_tax', 'minimum_amount_currency': 'minimum_amount_currency', 'every_recurring_payments': 'every_recurring_payments', 'reduction_percent': 'reduction_percent', 'reduction_amount': 'reduction_amount', 'reduction_currency': 'reduction_currency', 'reduction_tax': 'reduction_tax', 'restriction_groups': 'restriction_groups' } self._id_customer = id_customer self._description = description self._priority = priority self._partial_use = partial_use self._code = code self._active = active self._name = name self._date_from = date_from self._date_to = date_to self._quantity = quantity self._quantity_per_user = quantity_per_user self._minimum_amount = minimum_amount self._minimum_amount_tax = minimum_amount_tax self._minimum_amount_currency = minimum_amount_currency self._every_recurring_payments = every_recurring_payments self._reduction_percent = reduction_percent self._reduction_amount = reduction_amount self._reduction_currency = reduction_currency self._reduction_tax = reduction_tax self._restriction_groups = restriction_groups @property def id_customer(self): """ Gets the id_customer of this UpdateCartRuleRequest. Limit to a single user :return: The id_customer of this UpdateCartRuleRequest. :rtype: int """ return self._id_customer @id_customer.setter def id_customer(self, id_customer): """ Sets the id_customer of this UpdateCartRuleRequest. Limit to a single user :param id_customer: The id_customer of this UpdateCartRuleRequest. :type: int """ self._id_customer = id_customer @property def description(self): """ Gets the description of this UpdateCartRuleRequest. For your eyes only. This will never be displayed to the customer :return: The description of this UpdateCartRuleRequest. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this UpdateCartRuleRequest. For your eyes only. This will never be displayed to the customer :param description: The description of this UpdateCartRuleRequest. :type: str """ self._description = description @property def priority(self): """ Gets the priority of this UpdateCartRuleRequest. Rules are applied by priority. A rule with a priority of \"1\" will be processed before one with a priority of \"2\" :return: The priority of this UpdateCartRuleRequest. :rtype: int """ return self._priority @priority.setter def priority(self, priority): """ Sets the priority of this UpdateCartRuleRequest. Rules are applied by priority. A rule with a priority of \"1\" will be processed before one with a priority of \"2\" :param priority: The priority of this UpdateCartRuleRequest. :type: int """ self._priority = priority @property def partial_use(self): """ Gets the partial_use of this UpdateCartRuleRequest. Allow to partial use this cart rule. If cart rule amount is greater than total customer order, a new cart rule will be created with the remainder amount. :return: The partial_use of this UpdateCartRuleRequest. :rtype: bool """ return self._partial_use @partial_use.setter def partial_use(self, partial_use): """ Sets the partial_use of this UpdateCartRuleRequest. Allow to partial use this cart rule. If cart rule amount is greater than total customer order, a new cart rule will be created with the remainder amount. :param partial_use: The partial_use of this UpdateCartRuleRequest. :type: bool """ self._partial_use = partial_use @property def code(self): """ Gets the code of this UpdateCartRuleRequest. Code used by customer to add it on his cart summary. Caution: rule will automatically be applied to everyone if you leave it blank :return: The code of this UpdateCartRuleRequest. :rtype: str """ return self._code @code.setter def code(self, code): """ Sets the code of this UpdateCartRuleRequest. Code used by customer to add it on his cart summary. Caution: rule will automatically be applied to everyone if you leave it blank :param code: The code of this UpdateCartRuleRequest. :type: str """ self._code = code @property def active(self): """ Gets the active of this UpdateCartRuleRequest. Status of the cart rule :return: The active of this UpdateCartRuleRequest. :rtype: bool """ return self._active @active.setter def active(self, active): """ Sets the active of this UpdateCartRuleRequest. Status of the cart rule :param active: The active of this UpdateCartRuleRequest. :type: bool """ self._active = active @property def name(self): """ Gets the name of this UpdateCartRuleRequest. This will be displayed in the cart summary, as well as on the invoice :return: The name of this UpdateCartRuleRequest. :rtype: list[I18nField] """ return self._name @name.setter def name(self, name): """ Sets the name of this UpdateCartRuleRequest. This will be displayed in the cart summary, as well as on the invoice :param name: The name of this UpdateCartRuleRequest. :type: list[I18nField] """ if name is None: raise ValueError("Invalid value for `name`, must not be `None`") self._name = name @property def date_from(self): """ Gets the date_from of this UpdateCartRuleRequest. Rule starts when this date is reached :return: The date_from of this UpdateCartRuleRequest. :rtype: str """ return self._date_from @date_from.setter def date_from(self, date_from): """ Sets the date_from of this UpdateCartRuleRequest. Rule starts when this date is reached :param date_from: The date_from of this UpdateCartRuleRequest. :type: str """ self._date_from = date_from @property def date_to(self): """ Gets the date_to of this UpdateCartRuleRequest. Rule ends when this date is reached :return: The date_to of this UpdateCartRuleRequest. :rtype: str """ return self._date_to @date_to.setter def date_to(self, date_to): """ Sets the date_to of this UpdateCartRuleRequest. Rule ends when this date is reached :param date_to: The date_to of this UpdateCartRuleRequest. :type: str """ self._date_to = date_to @property def quantity(self): """ Gets the quantity of this UpdateCartRuleRequest. The cart rule will be applied to the first \"X\" orders only :return: The quantity of this UpdateCartRuleRequest. :rtype: int """ return self._quantity @quantity.setter def quantity(self, quantity): """ Sets the quantity of this UpdateCartRuleRequest. The cart rule will be applied to the first \"X\" orders only :param quantity: The quantity of this UpdateCartRuleRequest. :type: int """ self._quantity = quantity @property def quantity_per_user(self): """ Gets the quantity_per_user of this UpdateCartRuleRequest. A customer will only be able to use the cart rule \"X\" time(s) :return: The quantity_per_user of this UpdateCartRuleRequest. :rtype: int """ return self._quantity_per_user @quantity_per_user.setter def quantity_per_user(self, quantity_per_user): """ Sets the quantity_per_user of this UpdateCartRuleRequest. A customer will only be able to use the cart rule \"X\" time(s) :param quantity_per_user: The quantity_per_user of this UpdateCartRuleRequest. :type: int """ self._quantity_per_user = quantity_per_user @property def minimum_amount(self): """ Gets the minimum_amount of this UpdateCartRuleRequest. You can choose a minimum amount for the cart, either with taxes or not :return: The minimum_amount of this UpdateCartRuleRequest. :rtype: int """ return self._minimum_amount @minimum_amount.setter def minimum_amount(self, minimum_amount): """ Sets the minimum_amount of this UpdateCartRuleRequest. You can choose a minimum amount for the cart, either with taxes or not :param minimum_amount: The minimum_amount of this UpdateCartRuleRequest. :type: int """ self._minimum_amount = minimum_amount @property def minimum_amount_tax(self): """ Gets the minimum_amount_tax of this UpdateCartRuleRequest. :return: The minimum_amount_tax of this UpdateCartRuleRequest. :rtype: int """ return self._minimum_amount_tax @minimum_amount_tax.setter def minimum_amount_tax(self, minimum_amount_tax): """ Sets the minimum_amount_tax of this UpdateCartRuleRequest. :param minimum_amount_tax: The minimum_amount_tax of this UpdateCartRuleRequest. :type: int """ self._minimum_amount_tax = minimum_amount_tax @property def minimum_amount_currency(self): """ Gets the minimum_amount_currency of this UpdateCartRuleRequest. Currency ID :return: The minimum_amount_currency of this UpdateCartRuleRequest. :rtype: int """ return self._minimum_amount_currency @minimum_amount_currency.setter def minimum_amount_currency(self, minimum_amount_currency): """ Sets the minimum_amount_currency of this UpdateCartRuleRequest. Currency ID :param minimum_amount_currency: The minimum_amount_currency of this UpdateCartRuleRequest. :type: int """ self._minimum_amount_currency = minimum_amount_currency @property def every_recurring_payments(self): """ Gets the every_recurring_payments of this UpdateCartRuleRequest. If customer cart contains a subscription, select if cart rule will apply on recurring payments :return: The every_recurring_payments of this UpdateCartRuleRequest. :rtype: bool """ return self._every_recurring_payments @every_recurring_payments.setter def every_recurring_payments(self, every_recurring_payments): """ Sets the every_recurring_payments of this UpdateCartRuleRequest. If customer cart contains a subscription, select if cart rule will apply on recurring payments :param every_recurring_payments: The every_recurring_payments of this UpdateCartRuleRequest. :type: bool """ self._every_recurring_payments = every_recurring_payments @property def reduction_percent(self): """ Gets the reduction_percent of this UpdateCartRuleRequest. Discount applied to cart when rule is added (in %). :return: The reduction_percent of this UpdateCartRuleRequest. :rtype: float """ return self._reduction_percent @reduction_percent.setter def reduction_percent(self, reduction_percent): """ Sets the reduction_percent of this UpdateCartRuleRequest. Discount applied to cart when rule is added (in %). :param reduction_percent: The reduction_percent of this UpdateCartRuleRequest. :type: float """ self._reduction_percent = reduction_percent @property def reduction_amount(self): """ Gets the reduction_amount of this UpdateCartRuleRequest. Discount applied to cart when rule is added (in currency) :return: The reduction_amount of this UpdateCartRuleRequest. :rtype: float """ return self._reduction_amount @reduction_amount.setter def reduction_amount(self, reduction_amount): """ Sets the reduction_amount of this UpdateCartRuleRequest. Discount applied to cart when rule is added (in currency) :param reduction_amount: The reduction_amount of this UpdateCartRuleRequest. :type: float """ self._reduction_amount = reduction_amount @property def reduction_currency(self): """ Gets the reduction_currency of this UpdateCartRuleRequest. Currency ID for reduction amount :return: The reduction_currency of this UpdateCartRuleRequest. :rtype: int """ return self._reduction_currency @reduction_currency.setter def reduction_currency(self, reduction_currency): """ Sets the reduction_currency of this UpdateCartRuleRequest. Currency ID for reduction amount :param reduction_currency: The reduction_currency of this UpdateCartRuleRequest. :type: int """ self._reduction_currency = reduction_currency @property def reduction_tax(self): """ Gets the reduction_tax of this UpdateCartRuleRequest. Tax application for currency discount :return: The reduction_tax of this UpdateCartRuleRequest. :rtype: int """ return self._reduction_tax @reduction_tax.setter def reduction_tax(self, reduction_tax): """ Sets the reduction_tax of this UpdateCartRuleRequest. Tax application for currency discount :param reduction_tax: The reduction_tax of this UpdateCartRuleRequest. :type: int """ self._reduction_tax = reduction_tax @property def restriction_groups(self): """ Gets the restriction_groups of this UpdateCartRuleRequest. :return: The restriction_groups of this UpdateCartRuleRequest. :rtype: list[CartRuleRestrictionGroup] """ return self._restriction_groups @restriction_groups.setter def restriction_groups(self, restriction_groups): """ Sets the restriction_groups of this UpdateCartRuleRequest. :param restriction_groups: The restriction_groups of this UpdateCartRuleRequest. :type: list[CartRuleRestrictionGroup] """ self._restriction_groups = restriction_groups def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
#!/usr/bin/env python # # Script to process example doxygen format # # Takes as input a "normal" doxygen file # that may have example tags in it. # # It outputs the new version of the file # import sys; import os; import re; import getopt; # Globals inFileName = None; outFileName = None; incPaths = []; # Print the usage information def printUsage(): print "Usage: AutoBuild --help --search=<search path>"; # Handles the command line parameters def parseCmdLine(): global inFileName, outFileName; try: opts, args = getopt.getopt(sys.argv[1:], "", ["help", "path="]) except getopt.GetoptError: # print help information and exit: printUsage() sys.exit(2) for o, a in opts: if o in ("-h", "--help"): printUsage() sys.exit() if o in ("--path",): incPaths.append(a); if len(args) == 0: print "Invalid number of args"; printUsage(); sys.exit(); else: inFileName = args[0]; if len(args) > 1: outFileName = args[1]; # Method called to replace a match on an exincludexample tag # - Open the other file and get it's content # - Remove all skips # - Surround code with code tags # - Remove comment begin and enders /* and */ # - Return it def replaceExInclude(match): # Load the file # - Assume it is relative to the included file inc_file_name = match.groups()[0]; inc_file_name = os.path.join(os.path.dirname(inFileName), inc_file_name); inc_file = file(inc_file_name); file_contents = inc_file.read(); # Replace skips skip_re = re.compile("@exskip.*?@exendskip", re.DOTALL); noskip_contents = skip_re.sub("",file_contents); num_subs = 0; # - Surround the code # NOTE: This will also match the case */ <space> /* which we dont' want code_bounds_re = re.compile("\*/(.*?)/\*", re.DOTALL); bad_codes_re = re.compile("@code\W*?@endcode", re.DOTALL); # re to remove codes that don't have content hanging_code_re = re.compile("\*/.*?(?! \*/)", re.DOTALL); # re to handle the case of hanging code at the end of the document (ie. */ comment then code and nothing else) with_code_contents = code_bounds_re.sub("\n@code\n\\1\n@endcode\n", noskip_contents); (with_code_contents, num_subs) = hanging_code_re.subn("\n@code\n", with_code_contents); if num_subs > 0: # If we added a hanging code, we need to close it too with_code_contents += "\n@endcode\n"; fixed_code_content = bad_codes_re.sub("", with_code_contents); # -- Remove comment bookends comment_ends_re = re.compile("(/\*)|(\*/)"); final_content = comment_ends_re.sub("", fixed_code_content); line_start_clean_re = re.compile("^\s*?\*", re.MULTILINE); final_content = line_start_clean_re.sub("", final_content); return final_content; # Actual method to process the file # - For any exincludeexample tags found # - Replace them with the resulting content # - Save out the new file def processFile(): exincludeexample_re = re.compile("@exincludeexample\s*?(\S+)"); # Match the @exincludeexample tag # - Load the files in_file = file(inFileName, 'r'); out_file = None; if(outFileName): out_file = file(outFileName, 'w'); # Truncates the old file # --- Process the file contents file_contents = in_file.read(); new_file_contents = exincludeexample_re.sub(replaceExInclude, file_contents); # --- write it out if out_file: out_file.write(new_file_contents); out_file.close(); else: print new_file_contents; in_file.close(); # # "Main" function # if __name__ == "__main__": parseCmdLine(); # Process the command line parameters processFile(); sys.exit();
engine.run_script('init-touchcursor') keyboard.send_keys('<ctrl>+z')
# database.py """Access to the database""" from datetime import datetime import sqlite3 from typing import NamedTuple, Optional, Union import discord from resources import exceptions, logs, settings ARCHMAGE_DB = sqlite3.connect(settings.DB_FILE, isolation_level=None) INTERNAL_ERROR_SQLITE3 = 'Error executing SQL.\nError: {error}\nTable: {table}\nFunction: {function}\SQL: {sql}' INTERNAL_ERROR_LOOKUP = 'Error assigning values.\nError: {error}\nTable: {table}\nFunction: {function}\Records: {record}' INTERNAL_ERROR_NO_ARGUMENTS = 'You need to specify at least one keyword argument.\nTable: {table}\nFunction: {function}' class User(NamedTuple): user_id: int target_enchant: int async def log_error(error: Union[Exception, str], ctx: Optional[discord.ApplicationContext] = None): """Logs an error to the database and the logfile Arguments --------- error: Exception or a simple string. ctx: If context is available, the function will log the user input, the message timestamp and the user settings. If not, current time is used, settings and input are logged as "N/A". Raises ------ sqlite3.Error when something goes wrong in the database. Also logs this error to the log file. """ table = 'errors' function_name = 'log_error' sql = 'INSERT INTO errors VALUES (?, ?, ?, ?, ?)' if ctx is not None: timestamp = ctx.author.created_at command_name = f'{ctx.command.full_parent_name} {ctx.command.name}'.strip() command_data = str(ctx.interaction.data) else: timestamp = datetime.utcnow() command_name = 'N/A' command_data = 'N/A' try: user_settings = await get_user(ctx.author.id) except: user_settings = 'N/A' try: cur = ARCHMAGE_DB.cursor() cur.execute(sql, (timestamp, command_name, command_data, str(error), user_settings)) except sqlite3.Error as error: logs.logger.error( INTERNAL_ERROR_SQLITE3.format(error=error, table=table, function=function_name, sql=sql), ctx ) raise # --- Get Data --- async def get_user(user_id: int) -> User: """Gets user settings. Returns ------- User object Raises ------ sqlite3.Error if something happened within the database. exceptions.NoDataFoundError if no user was found. LookupError if something goes wrong reading the dict. Also logs all errors to the database. """ table = 'settings_user' function_name = 'get_user' sql = 'SELECT * FROM settings_user where user_id=?' try: cur = ARCHMAGE_DB.cursor() cur.row_factory = sqlite3.Row cur.execute(sql, (user_id,)) record = cur.fetchone() except sqlite3.Error as error: await log_error( INTERNAL_ERROR_SQLITE3.format(error=error, table=table, function=function_name, sql=sql) ) raise if not record: raise exceptions.NoDataFoundError('User not in database') try: user_settings = User( user_id = record['user_id'], target_enchant = record['target_enchant'], ) except Exception as error: await log_error( INTERNAL_ERROR_LOOKUP.format(error=error, table=table, function=function_name, record=record) ) raise LookupError return user_settings async def get_user_count(ctx: discord.ApplicationContext) -> int: """Gets the amount of users in the database. Returns ------- Amound of users: int Raises ------ sqlite3.Error if something happened within the database. Also logs this error to the log file. """ table = 'settings_user' function_name = 'get_user_count' sql = 'SELECT COUNT(user_id) FROM settings_user' try: cur = ARCHMAGE_DB.cursor() cur.execute(sql) record = cur.fetchone() except sqlite3.Error as error: await log_error( INTERNAL_ERROR_SQLITE3.format(error=error, table=table, function=function_name, sql=sql), ctx ) raise (user_count,) = record return user_count # --- Write Data --- async def update_user(user_id: int, **kwargs) -> None: """Updates guild settings. Arguments --------- user_id: int kwargs (column=value): target_enchant: int Raises ------ sqlite3.Error if something happened within the database. NoArgumentsError if not kwargs are passed (need to pass at least one) Also logs all error to the database. """ table = 'settings_user' function_name = 'update_user' if not kwargs: await log_error( INTERNAL_ERROR_NO_ARGUMENTS.format(table=table, function=function_name) ) raise exceptions.NoArgumentsError('You need to specify at least one keyword argument.') cur = ARCHMAGE_DB.cursor() try: await get_user(user_id) except exceptions.NoDataFoundError: sql = 'INSERT INTO settings_user (user_id, target_enchant) VALUES (?, ?)' try: cur.execute(sql, (user_id, settings.ENCHANTS.index('None'))) except sqlite3.Error as error: await log_error( INTERNAL_ERROR_SQLITE3.format(error=error, table=table, function=function_name, sql=sql) ) raise try: sql = 'UPDATE settings_user SET' for kwarg in kwargs: sql = f'{sql} {kwarg} = :{kwarg},' sql = sql.strip(",") kwargs['user_id'] = user_id sql = f'{sql} WHERE user_id = :user_id' cur.execute(sql, kwargs) except sqlite3.Error as error: await log_error( INTERNAL_ERROR_SQLITE3.format(error=error, table=table, function=function_name, sql=sql) ) raise
__all__ = ["DWDataReader", "detect_peaks"]
# coding: utf-8 from collections import OrderedDict, Iterable from tornado.web import URLSpec class Route(object): def __init__(self, pattern, handler, args=None, name=None): self.pattern = pattern self.handler = handler self.name = name self.args = args def get_url_pattern(self, base_url=""): pattern = self.pattern base_url = base_url.strip("^$/") # filter base_url metachars and slashes if base_url: pattern = r"%s/%s" % (r"^/" + base_url, self.pattern.strip("^$").lstrip("/")) if not pattern.endswith("$"): pattern += "$" return pattern def get_url_spec(self, base_url=""): pattern = self.get_url_pattern(base_url) return URLSpec(pattern, self.handler, self.args, name=self.name) class Routes(object): def __init__(self, settings): self.settings = settings self.map = OrderedDict() def register(self, routes, base_url=""): if not isinstance(routes, Iterable): routes = (routes,) for route in routes: self.map[route.get_url_pattern(base_url)] = (route, base_url) def get_urlspecs(self): return [route.get_url_spec(base_url) for route, base_url in self.map.values()]
from bandcamper.__version__ import __author__ from bandcamper.__version__ import __author_email__ from bandcamper.__version__ import __copyright__ from bandcamper.__version__ import __description__ from bandcamper.__version__ import __license__ from bandcamper.__version__ import __title__ from bandcamper.__version__ import __url__ from bandcamper.__version__ import __version__ from bandcamper.bandcamper import Bandcamper
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-05-06 20:46 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Grundgeruest', '0014_auto_20180504_1518'), ] operations = [ migrations.AddField( model_name='unterstuetzung', name='zahlung_id', field=models.CharField(blank=True, max_length=200, null=True, unique=True), ), ]
# coding: UTF-8 print(''' 【程序1】题目:古典问题:有一对兔子,从出生后第3个月起每个月都生一对兔子,小兔子长到第三个月后每个月又生一对兔子,假如兔子都不死,问每个月的兔子总数为多少?1.程序分析:兔子的规律为数列1,1,2,3,5,8,13,21.... ''') print("输出数列: 100周的结果数列") # def number_list(sequence): # for month in range(3, 1555+1): # program_sign_i = month - 1 # one = sequence[program_sign_i - 2] # two = sequence[program_sign_i - 1] # result = one + two; # print(result) # sequence.append(result) # if len(str(result)) > 100: # break; # print(sequence) # print("len: ",len(sequence)) # number_list(sequence = [1, 1]) # def number_list(first_num, second_num): # result = first_num + second_num # print(result) # if len(str(result)) < 209: # number_list(second_num, result) # number_list(1, 1) # def number_list(first_num = 1, second_num = 1): # while len(str(second_num)) < 209: # result = first_num + second_num # first_num = second_num # second_num = result # print(result) # number_list() def number_list(first_num = 1, second_num = 1): array = [first_num, second_num] while len(str(second_num)) < 209: result = first_num + second_num first_num = second_num second_num = result array.append(result) print(array) number_list()
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import heatclient.exc as heat_exceptions import time from tempest.scenario import manager from tempest.test import attr from tempest.test import call_until_true from tempest.test import services from tempest.test import skip_because class AutoScalingTest(manager.OrchestrationScenarioTest): def setUp(self): super(AutoScalingTest, self).setUp() if not self.config.orchestration.image_ref: raise self.skipException("No image available to test") self.client = self.orchestration_client def assign_keypair(self): self.stack_name = self._stack_rand_name() if self.config.orchestration.keypair_name: self.keypair_name = self.config.orchestration.keypair_name else: self.keypair = self.create_keypair() self.keypair_name = self.keypair.id def launch_stack(self): net = self._get_default_network() self.parameters = { 'KeyName': self.keypair_name, 'InstanceType': self.config.orchestration.instance_type, 'ImageId': self.config.orchestration.image_ref, 'StackStart': str(time.time()), 'Subnet': net['subnets'][0] } # create the stack self.template = self._load_template(__file__, 'test_autoscaling.yaml') self.client.stacks.create( stack_name=self.stack_name, template=self.template, parameters=self.parameters) self.stack = self.client.stacks.get(self.stack_name) self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id) # if a keypair was set, do not delete the stack on exit to allow # for manual post-mortums if not self.config.orchestration.keypair_name: self.set_resource('stack', self.stack) #@skip_because(bug="1257575") @attr(type='slow') @services('orchestration', 'compute') def test_scale_up_then_down(self): self.assign_keypair() self.launch_stack() sid = self.stack_identifier timeout = self.config.orchestration.build_timeout interval = 10 self.assertEqual('CREATE', self.stack.action) # wait for create to complete. self.status_timeout(self.client.stacks, sid, 'COMPLETE', error_status='FAILED') self.stack.get() self.assertEqual('CREATE_COMPLETE', self.stack.stack_status) # the resource SmokeServerGroup is implemented as a nested # stack, so servers can be counted by counting the resources # inside that nested stack resource = self.client.resources.get(sid, 'SmokeServerGroup') nested_stack_id = resource.physical_resource_id self.client def server_count(): # the number of servers is the number of resources # in the nested stack self.server_count = len( self.client.resources.list(nested_stack_id)) return self.server_count def assertScale(from_servers, to_servers): call_until_true(lambda: server_count() == to_servers, timeout, interval) self.assertEqual(to_servers, self.server_count, 'Failed scaling from %d to %d servers. ' 'Current server count: %s' % ( from_servers, to_servers, self.server_count)) # he marched them up to the top of the hill assertScale(1, 2) assertScale(2, 3) # and he marched them down again assertScale(3, 2) assertScale(2, 1) # delete stack on completion self.stack.delete() self.status_timeout(self.client.stacks, sid, 'COMPLETE', error_status='FAILED', not_found_exception=heat_exceptions.NotFound) try: self.stack.get() self.assertEqual('DELETE_COMPLETE', self.stack.stack_status) except heat_exceptions.NotFound: pass
# Analysis.py # # Created: Unk, , T. Lukaczyk # Modified: Jan, 2016, M. Vegh # ---------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------- from SUAVE.Core import Data # ---------------------------------------------------------------------- # Analysis Data Class # ---------------------------------------------------------------------- class Analysis(Data): """ SUAVE Data Class for Analysis used for """ def __defaults__(self): self.Vehicle = None self.Mission = None self.Procedure = AnalysisMap() def solve(self): procedure = self.procedure for segment,configuration in procedure.items(): results = segment.solve(configuration) def __str__(self): args = '' args += self.dataname() + '\n' args += 'Vehicle = %s\n' % self.Vehicle.tag args += 'Mission = %s\n' % self.Mission.tag args += 'Procedure =\n' for step in self.Procedure.values(): seg = step[0] con = step[1] args += ' %s : %s\n' % (seg.tag,con.tag) return args class AnalysisMap(Data): pass
import Drop7.Position as Position def test_Is_Proper_Position__Legal_Case(score, max_score): """Function is_proper_position: given position is proper position.""" max_score.value += 1 try: assert Position.is_proper_position(4, (1, 1)) assert Position.is_proper_position(4, (4, 4)) assert Position.is_proper_position(4, (2, 3)) assert Position.is_proper_position(4, (4, 5)) # Overflow position score.value += 1 except: pass def test_Is_Proper_Position__Not_Tuple(score, max_score): """Function is_proper_position: given position is not a tuple.""" max_score.value += 1 try: assert not Position.is_proper_position(4, [1, 2]) score.value += 1 except: pass def test_Is_Proper_Position__Improper_Length(score, max_score): """Function is_proper_position: given position is a tuple of length different from 2.""" max_score.value += 1 try: assert not Position.is_proper_position(4, (1,)) score.value += 1 except: pass def test_Is_Proper_Position__Improper_Column(score, max_score): """Function is_proper_position: given position is a tuple of length 2 with improper column.""" max_score.value += 1 try: assert not Position.is_proper_position(4, (0, 3)) assert not Position.is_proper_position(4, (5, 3)) score.value += 1 except: pass def test_Is_Proper_Position__Improper_Row(score, max_score): """Function is_proper_position: given position is a tuple of length 2 with improper row.""" max_score.value += 1 try: assert not Position.is_proper_position(4, (3, 0)) assert not Position.is_proper_position(4, (3, 6)) score.value += 1 except: pass def test_Is_Overflow_Position__Single_Case(score, max_score): """Function is_overflow_position: single case.""" max_score.value += 1 try: assert not Position.is_overflow_position(4, (1, 1)) assert not Position.is_overflow_position(4, (4, 4)) assert Position.is_overflow_position(4, (4, 5)) score.value += 1 except: pass def test_Left__Not_Leftmost_Position(score, max_score): """Function left: non-leftmost position""" max_score.value += 1 try: assert Position.left(8, (2, 4)) == (1, 4) score.value += 1 except: pass def test_Left__Leftmost_Position(score, max_score): """Function up: leftmost position""" max_score.value += 1 try: assert Position.left(8, (1, 6)) == None score.value += 1 except: pass def test_Right__Not_Rightmost_Position(score, max_score): """Function right: non-rightmost position""" max_score.value += 1 try: assert Position.right(8, (2, 4)) == (3, 4) score.value += 1 except: pass def test_Right__Rightmost_Position(score, max_score): """Function right: rightmost position""" max_score.value += 1 try: assert Position.right(8, (8, 6)) == None score.value += 1 except: pass def test_Up__Not_Upmost_Position(score, max_score): """Function up: non-upmost position""" max_score.value += 1 try: assert Position.up(8, (2, 3)) == (2, 4) score.value += 1 except: pass def test_Up__Upmost_Position(score, max_score): """Function up: upmost position""" max_score.value += 1 try: assert Position.up(8, (2, 8)) == (2,9) score.value += 1 except: pass def test_Up__Overflow_Position(score, max_score): """Function up: overflow position""" max_score.value += 1 try: assert Position.up(8, (2, 9)) == None score.value += 1 except: pass def test_Down__Not_Bottom_Position(score, max_score): """Function down: not bottom position""" max_score.value += 1 try: assert Position.down(8, (2, 5)) == (2, 4) score.value += 1 except: pass def test_Down__Bottom_Position(score, max_score): """Function down: bottom position""" max_score.value += 1 try: assert Position.down(8, (2, 1)) == None score.value += 1 except: pass def test_Next_Not_End_Row(score, max_score): """Function next: position not at end of row""" max_score.value += 1 try: assert Position.next(4, (3, 1)) == (4, 1) assert Position.next(4, (2, 4)) == (3, 4) score.value += 1 except: pass def test_Next_End_Non_Top_Row(score, max_score): """Function next: position at end of row that is not top row""" max_score.value += 1 try: assert Position.next(4, (4, 2)) == (1, 3) score.value += 1 except: pass def test_Next_End_Top_Row(score, max_score): """Function next: position at end of top row""" max_score.value += 1 try: assert Position.next(4, (4, 5)) == None score.value += 1 except: pass def test_Get_All_Adjacent_Positions__Empty_Collection(score, max_score): """Function get_all_adjacent_positions: empty collection.""" max_score.value += 1 try: assert Position.get_all_adjacent_positions(6, []) == set() score.value += 1 except: pass def test_Get_All_Adjacent_Positions__Bottom_Left_Position(score, max_score): """Function get_all_adjacent_positions: bottom left position.""" max_score.value += 1 try: assert Position.get_all_adjacent_positions(6, [(1, 1)]) == set([(1, 2), (2, 1)]) score.value += 1 except: pass def test_Get_All_Adjacent_Positions__Bottom_Right_Position(score, max_score): """Function get_all_adjacent_positions: bottom right position.""" max_score.value += 1 try: assert Position.get_all_adjacent_positions(6, [(6, 1)]) == set([(6, 2), (5, 1)]) score.value += 1 except: pass def test_Get_All_Adjacent_Positions__Top_Left_Position(score, max_score): """Function get_all_adjacent_positions: top left position.""" max_score.value += 1 try: assert Position.get_all_adjacent_positions(6, [(1, 7)]) == set([(1, 6), (2, 7)]) score.value += 1 except: pass def test_Get_All_Adjacent_Positions__Top_Right_Position(score, max_score): """Function get_all_adjacent_positions: top right position.""" max_score.value += 1 try: assert Position.get_all_adjacent_positions(6, [(6, 7)]) == set([(6, 6), (5, 7)]) score.value += 1 except: pass def test_Get_All_Adjacent_Positions__Arbitrary_Position(score, max_score): """Function get_all_adjacent_positions: arbitrary position.""" max_score.value += 1 try: assert Position.get_all_adjacent_positions(6, [(2, 4)]) == set([(2, 3), (2, 5), (1, 4), (3, 4)]) score.value += 1 except: pass def test_Get_All_Adjacent_Positions__Collection_of_Positions(score, max_score): """Function get_all_adjacent_positions: collection of positions.""" max_score.value += 3 try: assert Position.get_all_adjacent_positions(6, [(2, 4), (3, 4), (3, 5)]) == \ set([(2, 3), (2, 5), (1, 4), (3, 4), (3, 3), (3, 5), (2, 4), (4, 4), (3, 4), (3, 6), (2, 5), (4, 5)]) score.value += 3 except: pass position_test_functions = \ { test_Is_Proper_Position__Not_Tuple, test_Is_Proper_Position__Improper_Length, test_Is_Proper_Position__Improper_Column, test_Is_Proper_Position__Improper_Row, test_Is_Proper_Position__Legal_Case, test_Is_Overflow_Position__Single_Case, test_Left__Not_Leftmost_Position, test_Left__Leftmost_Position, test_Right__Not_Rightmost_Position, test_Right__Rightmost_Position, test_Up__Not_Upmost_Position, test_Up__Upmost_Position, test_Up__Overflow_Position, test_Down__Not_Bottom_Position, test_Down__Bottom_Position, test_Next_Not_End_Row, test_Next_End_Non_Top_Row, test_Next_End_Top_Row, test_Get_All_Adjacent_Positions__Empty_Collection, test_Get_All_Adjacent_Positions__Bottom_Left_Position, test_Get_All_Adjacent_Positions__Bottom_Right_Position, test_Get_All_Adjacent_Positions__Top_Left_Position, test_Get_All_Adjacent_Positions__Top_Right_Position, test_Get_All_Adjacent_Positions__Arbitrary_Position, test_Get_All_Adjacent_Positions__Collection_of_Positions }
# coding: utf-8 import pytest if __name__ == "__main__": # import os # import sys # sys.path.append(os.path.realpath('..')) pytest.main([__file__]) from tests.import_check import ImportCheck def test_single(): # from ooobuild.lo.document.filter_options_request import FilterOptionsRequest # ns = "ooobuild.lo.document.filter_options_request.FilterOptionsRequest" ns = "ooobuild.lo.ucb.open_command_argument.OpenCommandArgument" imc = ImportCheck() assert imc.load_import(ns) == True
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from pathlib import Path from time import time from typing import Dict, List import geopandas as gpd import numpy as np from pandas import DataFrame, concat, read_csv, to_datetime from uszipcode import SearchEngine search = SearchEngine(simple_zipcode=True) def append_clean_data(df: DataFrame, unwanted_cols: List = []) -> DataFrame: """Feature engineering for datetime features""" df["datetime"] = to_datetime(df["date"], format="%m/%d/%Y %H:%M:%S %p") L = [ "month", "day", "hour", "dayofweek", "dayofyear", "weekofyear", "quarter", ] df = df.join( concat((getattr(df["datetime"].dt, i).rename(i) for i in L), axis=1) ) df["day_name"] = df["datetime"].dt.day_name() df["date_yymmdd"] = df["datetime"].dt.date df["is_weekend"] = False weekend_days = ["Saturday", "Sunday"] df.loc[df["day_name"].isin(weekend_days), "is_weekend"] = True df["is_dark"] = True df.loc[df["hour"].isin(list(range(8, 18))), "is_dark"] = False df = df.drop_duplicates() if unwanted_cols: df.drop(unwanted_cols, inplace=True, axis=1) df = df.dropna(how="any") return df def load_merge_slice_data( dtypes_dict: Dict, file_paths: List, years_wanted: List, months_wanted: List = [1, 2, 3], cols_to_drop=["abc"], ) -> DataFrame: """ Merge files from list of filepaths, drop unwanted cols and slice by datetime attributes """ df = concat( [read_csv(f, dtype=dtypes_dict) for f in file_paths], ignore_index=True ) df = df.drop(cols_to_drop, axis=1) df.columns = map(str.lower, df.columns) df.columns = df.columns.str.replace(" ", "_") mask1 = df["year"].isin(years_wanted) mask2 = to_datetime(df["date"]).dt.month.isin(months_wanted) df = df[(mask1) & (mask2)] return df def drop_non_zero_rows(df: DataFrame, col_rows_to_drop: str) -> DataFrame: """ Drop rows that are not all zeros """ df = df[ df[col_rows_to_drop] != 0 ] # assumes zeros occur ONLY on same row, for each column return df def get_population_vectorize(lat: float, lng: float) -> float: """ Get total population of 10 closest points within 1 mile of latitude-longitude co-ordinate """ result = search.by_coordinates(lat, lng, radius=1, returns=10) population = sum([r.population for r in result]) return population def get_housing_units(lat: float, lng: float) -> float: """ Get total number of housing units of 10 closest points within 1 mile of latitude-longitude co-ordinate """ result = search.by_coordinates(lat, lng, radius=1, returns=10) housing_units = sum([r.housing_units for r in result]) return housing_units def get_median_household_value(lat: float, lng: float) -> float: """ Get median household value of 10 closest points within 1 mile of latitude-longitude co-ordinate """ result = search.by_coordinates(lat, lng, radius=1, returns=10) home_value = sum( [ r.median_home_value if r.median_home_value is not None else 0 for r in result ] ) return home_value def get_median_household_income(lat: float, lng: float) -> float: """ Get median household income of 10 closest points within 1 mile of latitude-longitude co-ordinate """ result = search.by_coordinates(lat, lng, radius=1, returns=10) home_income = sum( [ r.median_household_income if r.median_household_income is not None else 0 for r in result ] ) return home_income def get_occupied_housing_values(lat: float, lng: float) -> float: """ Get total number of occupied housing units of 10 closest points within 1 mile of latitude-longitude co-ordinate """ result = search.by_coordinates(lat, lng, radius=1, returns=10) occ_homes = sum([r.occupied_housing_units for r in result]) return occ_homes def get_zipcode(lat: float, lng: float) -> int: """ Get total number of zipcodes for 10 closest points within 1 mile of latitude-longitude co-ordinate """ result = search.by_coordinates(lat, lng, radius=1, returns=10) zip_codes = np.count_nonzero( np.asarray( [r.zipcode if r.zipcode is not None else None for r in result] ) ) return zip_codes def append_demographic_data(df: DataFrame) -> List: d = { "total_population": [get_population_vectorize], "housing_units": [get_housing_units], "median_household_value": [get_median_household_value], "median_household_income": [get_median_household_income], "occupied_housing_values": [get_occupied_housing_values], "zipcode": [get_zipcode], } for k, v in d.items(): start_time = time() df[k] = np.vectorize(v[0])(lat=df["latitude"], lng=df["longitude"]) minutes, seconds = divmod(time() - start_time, 60) v += [minutes, seconds] df_execution_times = DataFrame.from_dict(d, orient="index").reset_index() df_execution_times.columns = ["feature", "function", "minutes", "seconds"] return df_execution_times, d def merge_with_weather_data( df_data: DataFrame, weather_data_file_path: Path, weather_data_date_col: str, wanted_weather_cols: List, merge_data_on: str = "date_yymmdd", merge_weather_data_on: str = "date_yymmdd", ) -> DataFrame: df_weather = read_csv(weather_data_file_path) df_weather[merge_weather_data_on] = to_datetime( df_weather[weather_data_date_col] ).dt.date df = df_data.merge( df_weather[wanted_weather_cols], left_on=merge_data_on, right_on=merge_weather_data_on, ) return df def write_data_to_csv( df: DataFrame, joined_data_path: Path, write_index: bool = False ) -> None: """ Write a DataFrame to a csv file """ df.to_csv(joined_data_path, index=write_index) def explode(path_to_file: Path) -> gpd.GeoDataFrame: """ Explodes a geodataframe Will explode muti-part geometries into single geometries. Original index is stored in column level_0 and zero-based count of geometries per multi- geometry is stored in level_1 Inputs ------ Args: gdf (gpd.GeoDataFrame) : input geodataframe with multi-geometries Returns: gdf (gpd.GeoDataFrame) : exploded geodataframe with a new index and two new columns: level_0 and level_1 SOURCE: https://gist.github.com/mhweber/cf36bb4e09df9deee5eb54dc6be74d26 #gistcomment-2353309 """ gdf2 = gpd.read_file(str(path_to_file)) gs = gdf2.explode() gdf3 = gs.reset_index().rename(columns={0: "geometry"}) gdf_out = gdf3.merge( gdf2.drop("geometry", axis=1), left_on="level_0", right_index=True ) gdf_index = ["level_0", "level_1"] gdf_out = gdf_out.set_index(gdf_index).set_geometry("geometry") gdf_out.crs = gdf2.crs gdf_out["geomlist"] = ( gdf_out["geometry"] .apply(lambda x: list(x.exterior.coords)) .reset_index()["geometry"] .values ) return gdf_out def point_inside_polygon(lat: float, lng: float, poly) -> bool: """Check if a point is inside a GeoDataFrame POLYGON""" n = len(poly) inside = False p1x, p1y = poly[0] for i in range(n + 1): p2x, p2y = poly[i % n] if lat > min(p1y, p2y): if lat <= max(p1y, p2y): if lng <= max(p1x, p2x): if p1y != p2y: xinters = (lat - p1y) * (p2x - p1x) / (p2y - p1y) + p1x if p1x == p2x or lng <= xinters: inside = not inside p1x, p1y = p2x, p2y return inside
from typing import List class HSPException(Exception): def __init__(self, message: str, errors: List[str] = None): self.errors = errors or [] super().__init__(message)
import torch import torch.nn as nn import torch.nn.functional as F from torch.distributions.normal import Normal class Conv3dBlock(nn.Module): def __init__(self, inc, outc, ksize=3, stride=1, pad=1): super(Conv3dBlock, self).__init__() self.conv = nn.Conv3d(in_channels=inc, out_channels=outc, kernel_size=ksize, stride=stride, padding=pad, bias=False) self.bn = nn.BatchNorm3d(outc) self.act = nn.LeakyReLU(inplace=True, negative_slope=0.2) nn.init.xavier_uniform_(self.conv.weight) def forward(self, x): return self.act(self.bn(self.conv(x))) class Deconv3d(nn.Module): def __init__(self, inc, outc, output_size, ksize=2, stride=2, pad=0, dilation=1): super(Deconv3d, self).__init__() self.output_size = output_size self.pad = pad self.deconv = nn.ConvTranspose3d( in_channels=inc, out_channels=outc, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, output_padding=0) self.bn = nn.BatchNorm3d(outc) self.act = nn.LeakyReLU(inplace=True, negative_slope=0.2) self.upsample_layer = nn.Upsample(size=output_size, mode='trilinear', align_corners=True) nn.init.xavier_uniform_(self.deconv.weight) def forward(self, x): out = self.act(self.bn(self.deconv(x))) if out.shape[-3:] != self.output_size: out = self.upsample_layer(out) return out class AdaptiveUpsample3d(nn.Module): def __init__(self, inc, outc, output_size): super(AdaptiveUpsample3d, self).__init__() self.upsample_layer = nn.Upsample(size=output_size, mode='trilinear', align_corners=True) self.conv = nn.Conv3d(inc, outc, kernel_size=1, stride=1, padding=0) nn.init.xavier_uniform_(self.conv.weight) def forward(self, x): output = self.upsample_layer(x) output = self.conv(output) return output class DDFFusion(nn.Module): def __init__(self, inc, out_shape): super(DDFFusion, self).__init__() self.conv = nn.Conv3d(in_channels=inc, out_channels=3, kernel_size=3, stride=1, padding=1) self.out_shape = out_shape self.conv.weight = nn.Parameter(Normal(0, 1e-5).sample(self.conv.weight.shape)) # nn.init.xavier_uniform(self.conv.weight) self.conv.bias = nn.Parameter(torch.zeros(self.conv.bias.shape)) def forward(self, x): output = self.conv(x) if output.shape[-3:] != self.out_shape: output = F.interpolate(output, size=self.out_shape, mode='trilinear', align_corners=True) return output class ResidualBlock(nn.Module): def __init__(self, channels, ksize=3, stride=1, pad=1): super(ResidualBlock, self).__init__() self.conv_block1 = Conv3dBlock(inc=channels, outc=channels, ksize=ksize, stride=stride, pad=pad) self.conv_block2 = Conv3dBlock(inc=channels, outc=channels, ksize=ksize, stride=stride, pad=pad) def forward(self, x): output = self.conv_block1(x) output = self.conv_block2(output) return output + x class DownsampleBlock(nn.Module): def __init__(self, inc, outc, ksize=3, stride=1, pad=1): super(DownsampleBlock, self).__init__() self.conv = Conv3dBlock(inc=inc, outc=outc, ksize=ksize, stride=stride, pad=pad) self.resblock = ResidualBlock(channels=outc, ksize=ksize, stride=stride, pad=pad) self.max_pool = nn.MaxPool3d(kernel_size=2, stride=2) def forward(self, x): f_jc = self.conv(x) f_down = self.resblock(f_jc) f_down = self.max_pool(f_down) return f_down, f_jc class UpSampleBlock(nn.Module): def __init__(self, inc, outc, output_size): super(UpSampleBlock, self).__init__() self.deconv = Deconv3d(inc=inc, outc=outc, output_size=output_size) self.adpt_up = AdaptiveUpsample3d(inc=inc, outc=outc, output_size=output_size) self.conv1 = Conv3dBlock(inc=outc, outc=outc) self.conv2 = Conv3dBlock(inc=outc, outc=outc) def forward(self, x): jc_feature, ori_feature = x[0], x[1] tmp = self.deconv(ori_feature) + self.adpt_up(ori_feature) res_feature = tmp + jc_feature tmp = self.conv1(tmp) tmp = self.conv2(tmp) return res_feature + tmp
#!/usr/bin/env python from __future__ import print_function import copy import glob import json import os from genutil import StringConstructor from pcmdi_metrics.mjo.lib import dict_merge def main(): mips = ["cmip5", "cmip6"] # mips = ["cmip5"] # mips = ["cmip6"] exps = ["historical"] pmprdir = "/p/user_pub/pmp/pmp_results/pmp_v1.1.2" # pmprdir = "/work/lee1043/imsi/result_test" for mip in mips: for exp in exps: case_id = find_latest(pmprdir, mip, exp) print("mip, exp, case_id:", mip, exp, case_id) merge_jsons(mip, exp, case_id, pmprdir) def merge_jsons(mip, exp, case_id, pmprdir): json_file_dir_template = os.path.join( pmprdir, "%(output_type)", "mjo", "%(mip)", "%(exp)", "%(case_id)" ) json_file_dir_template = StringConstructor(json_file_dir_template) json_file_dir = json_file_dir_template( output_type="metrics_results", mip=mip, exp=exp, case_id=case_id ) print("json_file_dir:", json_file_dir) json_file_template = ( "mjo_stat_%(mip)_%(exp)_da_atm_%(model)_%(realization)_1985-2004" ) json_file_template = StringConstructor(json_file_template) # Search for individual JSONs json_files = sorted( glob.glob( os.path.join( json_file_dir, json_file_template( mip=mip, exp=exp, case_id=case_id, model="*", realization="*" ) + ".json", ) ) ) # Remove diveDown JSONs and previously generated merged JSONs if included json_files_revised = copy.copy(json_files) for j, json_file in enumerate(json_files): filename_component = json_file.split("/")[-1].split(".")[0].split("_") if "diveDown" in filename_component: json_files_revised.remove(json_file) elif "allModels" in filename_component: json_files_revised.remove(json_file) elif "allRuns" in filename_component: json_files_revised.remove(json_file) # Load individual JSON and merge to one big dictionary for j, json_file in enumerate(json_files_revised): print(j, json_file) f = open(json_file) dict_tmp = json.loads(f.read()) if j == 0: dict_final = dict_tmp.copy() else: dict_merge(dict_final, dict_tmp) f.close() # Dump final dictionary to JSON final_json_filename = ( json_file_template( mip=mip, exp=exp, case_id=case_id, model="allModels", realization="allRuns" ) + ".json" ) final_json_file = os.path.join(json_file_dir, final_json_filename) print("final_json_filename:", final_json_filename) with open(final_json_file, "w") as fp: json.dump(dict_final, fp, sort_keys=True, indent=4) print("Done: check ", final_json_file) def find_latest(pmprdir, mip, exp): versions = sorted( [ r.split("/")[-1] for r in glob.glob( os.path.join(pmprdir, "metrics_results", "mjo", mip, exp, "v????????") ) ] ) latest_version = versions[-1] return latest_version if __name__ == "__main__": main()
class Matrix: numberOfRows = 0 numberOfColumns = 0 content = [[0]] #Initializes the content variable with zeros def __init__(self, rows, columns): self.numberOfRows = rows self.numberOfColumns = columns self.content = [[0 for x in range(columns)] for x in range(rows)] for i in range(self.numberOfRows): for j in range(self.numberOfColumns): self.content[i][j] = 0 def changeElement(self, rowNumber, columnNumber, value): self.content[rowNumber - 1][columnNumber - 1] = value def getTransposedMatrix(self): newMatrix = Matrix(self.numberOfColumns, self.numberOfRows) newMatrix.content = list(map(list, zip(*(self.content)))) return newMatrix def getRow(self, rowNumber): return self.content[rowNumber - 1] #Takes a list and replaces a single row in the matrix with the contents of the list #The function automatically truncates or expands lists that aren't the corrent size def setRow(self, rowNumber, newRow): if len(newRow) == self.numberOfColumns: self.content[rowNumber - 1] = newRow elif len(newRow) > self.numberOfColumns: newRow = newRow[0:self.numberOfColumns] self.content[rowNumber - 1] = newRow else: while len(newRow) < self.numberOfColumns: newRow.append(0) self.content[rowNumber - 1] = newRow def getColumn(self, columnNumber): transposedMatrix = list(map(list, zip(*(self.content)))) return transposedMatrix[columnNumber - 1] #Works the same way as the setRow() function except it transposes the matrix to perform the substituitions and then transposes it back def setColumn(self, columnNumber, newColumn): transposedMatrix = self.getTrasnposedMatrix() if len(newColumn) == self.numberOfColumns: transposedMatrix.content[columnNumber - 1] = newColumn elif len(newColumn) > self.numberOfColumns: newColumn = newColumn[0:self.numberOfColumns] transposedMatrix.content[columnNumber - 1] = newColumn else: while len(newColumn) < self.numberOfColumns: newColumn.append(0) transposedMatrix.content[columnNumber - 1] = newColumn self.content = transposedMatrix.getTrasnposedMatrix().content def scalarMultiplication(self, scalar): for i in range(self.numberOfRows): for j in range(self.numberOfColumns): self.content[i][j] = scalar * self.content[i][j] def getMatrix2DArray(self): return self.content
from django.contrib import admin from mainapp.models import Category, Key, Arrangement, Order, Support class ArrangementAdmin(admin.ModelAdmin): search_fields = ('category', 'key') list_filter = ('category', 'key') admin.site.register(Category) admin.site.register(Key) admin.site.register(Arrangement, ArrangementAdmin) admin.site.register(Order) admin.site.register(Support)
# coding: utf8 from .settings_prod import *
from pymemcache.client.autodiscovery import AutodiscoveryClient from .test_client import MockSocket import unittest import pytest class MockSocketModule(object): def __init__(self, recv_bufs): self.sock = MockSocket(recv_bufs) self.AF_INET = 0 self.SOCK_STREAM = 0 def socket(self, family, type): return self.sock #def __getattr__(self, name): # return getattr(socket, name) @pytest.mark.unit() class TestAutodiscoveryClient(unittest.TestCase): def make_client(self, mock_socket_values, **kwargs): kwargs.update({ 'socket_module': MockSocketModule(list(mock_socket_values)) }) client = AutodiscoveryClient(endpoint=("127.0.0.1", 11211), autodiscovery=False, **kwargs) return client def test_cluster_one_node(self): # autodiscovery returns one node ########### client = self.make_client([ b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n1\nlocalhost|127.0.0.1|11211\n\r\nEND\r\n' ]) assert len(client.clients) == 1 assert client.clients['127.0.0.1:11211'] is not None def test_cluster_two_nodes(self): # autodiscovery returns two nodes ########### client = self.make_client([ b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n1\nlocalhost|127.0.0.1|11211 localhost|127.0.0.1|11212\n\r\nEND\r\n' ]) assert len(client.clients) == 2 assert client.clients['127.0.0.1:11211'] is not None assert client.clients['127.0.0.1:11212'] is not None def test_cluster_no_changes(self): # autodiscovery returns one node and then the same ########### client = self.make_client([ b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n1\nlocalhost|127.0.0.1|11211\n\r\nEND\r\n', b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n1\nlocalhost|127.0.0.1|11211\n\r\nEND\r\n', ]) assert len(client.clients) == 1 assert client.clients['127.0.0.1:11211'] is not None client.check_cluster() assert len(client.clients) == 1 assert client.clients['127.0.0.1:11211'] is not None def test_cluster_add_node(self): # autodiscovery returns one and then two nodes ########### client = self.make_client([ b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n1\nlocalhost|127.0.0.1|11211\n\r\nEND\r\n', b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n2\nlocalhost|127.0.0.1|11211 localhost|127.0.0.1|11212\n\r\nEND\r\n' ]) assert len(client.clients) == 1 assert client.clients['127.0.0.1:11211'] is not None client.check_cluster() assert len(client.clients) == 2 assert client.clients['127.0.0.1:11211'] is not None assert client.clients['127.0.0.1:11212'] is not None def test_cluster_remove_node(self): # autodiscovery returns two and then one nodes ########### client = self.make_client([ b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n1\nlocalhost|127.0.0.1|11211 localhost|127.0.0.1|11212\n\r\nEND\r\n', b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n2\nlocalhost|127.0.0.1|11211\n\r\nEND\r\n' ]) assert len(client.clients) == 2 assert client.clients['127.0.0.1:11211'] is not None assert client.clients['127.0.0.1:11212'] is not None client.check_cluster() assert len(client.clients) == 1 assert client.clients['127.0.0.1:11211'] is not None def test_cluster_key_add_node(self): # test key consistency after adding nodes ########### client = self.make_client([ b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n1\nlocalhost|127.0.0.1|11211\n\r\nEND\r\n', b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n2\nlocalhost|127.0.0.1|11211 localhost|127.0.0.1|11212\n\r\nEND\r\n' ]) assert client.hasher.get_node("b") == client.hasher.get_node("z") client.check_cluster() assert client.hasher.get_node("b") != client.hasher.get_node("z") def test_cluster_key_remove_node(self): # test key consistency after removing nodes ########### client = self.make_client([ b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n1\nlocalhost|127.0.0.1|11211 localhost|127.0.0.1|11212\n\r\nEND\r\n', b'VERSION 1.4.20\r\n', b'CONFIG cluster 0 147\r\n2\nlocalhost|127.0.0.1|11211\n\r\nEND\r\n' ]) assert client.hasher.get_node("b") != client.hasher.get_node("z") client.check_cluster() assert client.hasher.get_node("b") == client.hasher.get_node("z")