repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
msadang/blazingsql
[ "5fe3e418dbee4a3961998b0e25ec81100a1a1490" ]
[ "tests/BlazingSQLTest/Runner/runTest.py" ]
[ "# Cast column to f64 before convert it to pandas\n# This is a hack, use the assert_equal comparator when nulls is\n# fully supported on cudf.sort_values\nimport json\nimport logging\nimport os\nimport re\nimport time\n\nimport blazingsql\nfrom blazingsql import DataType\n\n# import git\nimport numpy as np\nimport pandas as pd\n\nfrom BlazingLogging import loggingHandler as lhandler\nfrom Configuration import ExecutionMode\nfrom Configuration import Settings as Settings\nfrom DataBase import createSchema as cs\n\nif ((Settings.execution_mode == ExecutionMode.FULL and\n Settings.compare_res == \"true\") or\n Settings.execution_mode == ExecutionMode.GENERATOR):\n print(Settings.execution_mode)\n print(Settings.compare_res)\n from pydrill.client import PyDrill\n from pyspark.sql.session import SparkSession\n\nclass Result:\n def __init__(self, columns, resultSet, resultBlz):\n self.columns = columns\n self.resultSet = resultSet\n self.resultBlz = resultBlz\n\n\nname = \"blzlogging\"\n\nHANDLER = lhandler.logging_handler()\n\n\nclass loggerblz:\n def __init__(self, query, error, totaltime):\n self.query = query\n self.error = error\n self.totaltime = totaltime\n\n\nclass result:\n def __init__(self, res_execution, error):\n self.res_execution = res_execution\n self.error = error\n\n\ndef logginghelper(name):\n # logging.basicConfig(filename='example.txt',level=logging.DEBUG)\n logging._defaultFormatter = logging.Formatter()\n logger = logging.getLogger(name)\n logger.handlers = []\n logger.setLevel(logging.DEBUG)\n logger.addHandler(HANDLER)\n return logger\n\n\ndef loggingClose(name):\n HANDLER.log = []\n\n\ndef upcast_to_float(df):\n for name in df.columns:\n if np.issubdtype(df[name].dtype, np.bool_):\n df[name] = df[name].astype(np.float32)\n elif np.issubdtype(df[name].dtype, np.integer):\n df[name] = df[name].astype(np.float64)\n return df\n\n\ndef to_pandas_f64_engine(df, expected_types_list):\n count = 0\n for col in df.columns:\n if count >= len(expected_types_list):\n break\n\n if expected_types_list[count] != np.dtype(object):\n if df.shape[0] > 0:\n if not np.issubdtype(df[col].dtype, np.number) and not np.issubdtype(\n df[col].dtype, np.datetime64\n ):\n if np.issubdtype(expected_types_list[count], np.bool_):\n df[col] = (\n df[col].map({\"true\": 1.0, \"false\": 0.0}).astype(np.float32)\n )\n elif np.issubdtype(expected_types_list[count], np.datetime64):\n df[col] = df[col].astype(expected_types_list[count])\n else:\n df[col] = pd.to_numeric(df[col], errors=\"coerce\")\n count = count + 1\n\n return df\n\n\ndef get_null_constants(df):\n null_values = {}\n for col, dtype in df.dtypes.to_dict().items():\n if np.issubdtype(dtype, np.datetime64):\n null_values[col] = np.datetime64(\"nat\")\n elif np.issubdtype(dtype, np.number):\n null_values[col] = np.nan\n return null_values\n\n\ndef compare_results(pdf1, pdf2, acceptable_difference, use_percentage, engine):\n np.warnings.filterwarnings(\"ignore\")\n\n if pdf1.size == 0 and pdf2.size == 0:\n return \"Success\"\n\n msg = \"\"\n if not isinstance(engine, str):\n if isinstance(engine, PyDrill):\n msg = \"PyDrill\"\n else:\n msg = \"PySpark\"\n elif engine==\"drill\":\n msg = \"PyDrill\"\n else:\n msg = \"PySpark\"\n\n msg = \"\"\n if not isinstance(engine, str):\n if isinstance(engine, PyDrill):\n msg = \"PyDrill\"\n else:\n msg = \"PySpark\"\n elif engine==\"drill\":\n msg = \"PyDrill\"\n else:\n msg = \"PySpark\"\n\n if pdf1.shape[0] == pdf2.shape[0]:\n if pdf1.shape[1] == pdf2.shape[1]:\n\n for name in pdf1.columns:\n if pdf1[name].dtype == np.object:\n pdf1[name] = pdf1[name].astype('string')\n\n for name in pdf2.columns:\n if pdf2[name].dtype == np.object:\n pdf2[name] = pdf2[name].astype('string')\n\n # Removing indexes, because those are considered when\n # comparing with equals()\n pdf1.reset_index(drop=True, inplace=True)\n pdf2.reset_index(drop=True, inplace=True)\n\n # Make the column labels equal as equals() also compare labels\n orig_pdf2_labels = pdf2.columns.to_list()\n pdf2.columns = pdf1.columns.to_list()\n\n exac_comp = pdf1.select_dtypes(exclude=np.inexact).equals(\n pdf2.select_dtypes(exclude=np.inexact)\n )\n\n # Restore labels\n pdf2.columns = orig_pdf2_labels\n\n tmp_pdf1 = pdf1.select_dtypes(include=np.inexact)\n tmp_pdf2 = pdf2.select_dtypes(include=np.inexact)\n\n\n if use_percentage:\n relative_tolerance = acceptable_difference\n absolute_tolerance = 0\n else:\n relative_tolerance = 0\n absolute_tolerance = acceptable_difference\n # np.allclose follows this formula:\n # absolute(a - b) <= (absolute_tolerance + relative_tolerance * absolute(b))\n\n res = np.all(exac_comp) and np.allclose(\n tmp_pdf1.values, tmp_pdf2.values, relative_tolerance,\n absolute_tolerance, equal_nan=True\n )\n if res:\n return \"Success\"\n else:\n return \"Fail: Different values\"\n else:\n return (\n \"Fail: Different number of columns blzSQLresult: \"\n + str(pdf1.shape[1])\n + \" \"\n + msg\n + \" result: \"\n + str(pdf2.shape[1])\n )\n else:\n return (\n \"Fail: Different number of rows blzSQLresult: \"\n + str(pdf1.shape[0])\n + \" \"\n + msg\n + \" result: \"\n + str(pdf2.shape[0])\n )\n\n\ndef begins_with(col1, col2, exp):\n return col1.startswith(exp) or col2.startswith(exp)\n\n\ndef compare_column_names(pdf1, pdf2):\n if len(pdf1.columns) != len(pdf2.columns):\n if pdf1.values.size == 0 and pdf2.values.size == 0:\n return True\n print(\"Different set of columns\")\n return False\n for blzCol, drillCol in zip(\n pdf1.columns.values.tolist(), pdf2.columns.values.tolist()\n ):\n if blzCol != drillCol:\n if (\n begins_with(drillCol, blzCol, \"EXPR\") is False\n and begins_with(drillCol, blzCol, \"count(\") is False\n ):\n print(\"Different columns\")\n return False\n return True\n\n# NOTE kharoly percy william: NEVER CHANGE THE ORDER of these\n# lines (the logger logic depends that we log first queryType and then queryId\n# WARNING DO NOT CHANGE THE CALL ORDER IN THIS FUCTION!\n\n\ndef get_Branch():\n branch = blazingsql.__branch_name__\n return branch\n\n\ndef get_CommitHash():\n commit = blazingsql.__version__\n return commit\n\n\ndef get_QueryId(input_type, test_name, test_id):\n query_id = (\n str(input_type).upper()\n + \"-\"\n + str(get_codTest(test_name)).upper()\n + \"-\"\n + str(test_id)\n )\n return query_id\n\n\ndef get_resultId(resultComparisson):\n result_id = 1\n if resultComparisson != \"Success\":\n result_id = 0\n return result_id\n\n\ndef get_codTest(test_name):\n switcher = {\n \"Aggregations without group by\": \"AGGWOGRBY\",\n \"Coalesce\": \"COALESCE\",\n \"Column Basis\": \"COLBAS\",\n \"Bindable Alias\": \"BALIAS\",\n \"Boolean\": \"BOOL\",\n \"Case\": \"CASE\",\n \"Cast\": \"CAST\",\n \"Common Table Expressions\": \"COMTABLEX\",\n \"Concat\": \"CONCAT\",\n \"Count Distinct\": \"COUNTD\",\n \"Count without group by\": \"COUNTWOGRBY\",\n \"Cross join\": \"CROSSJOIN\",\n \"Date\": \"DATE\",\n \"DayOfWeek\": \"DAYOFWEEK\",\n \"Dir\": \"DIR\",\n \"File System Google Storage\": \"FSGS\",\n \"Hdfs FileSystem\": \"FSHDFS\",\n \"Hive FileSystem\": \"FSHIVE\",\n \"File System Local\": \"FSLOCAL\",\n \"File System S3\": \"FSS3\",\n \"Full outer join\": \"FOUTJOIN\",\n \"Group by\": \"GROUPBY\",\n \"Group by without aggregations\": \"GRBYWOAGG\",\n \"Inner join\": \"INNERJOIN\",\n \"Left outer join\": \"LOUTJOIN\",\n \"Like\": \"LIKE\",\n \"Literal\": \"LITERAL\",\n \"Nested Queries\": \"NESTEDQ\",\n \"Non-EquiJoin Queries\": \"NEQUIJOIN\",\n \"Order by\": \"ORDERBY\",\n \"Predicates With Nulls\": \"PREDWNULLS\",\n \"Round\": \"ROUND\",\n \"Replace\": \"REPLACE\",\n \"Simple Distribution From Local\": \"SIMPLEDIST\",\n \"Smiles Test\": \"SMILES\",\n \"Substring\": \"SUBSTRING\",\n \"Tables from Pandas\": \"TBLPANDAS\",\n \"Timestampdiff\": \"TIMESTAMPD\",\n \"Timestamp\": \"TIMESTAMP\",\n \"To_timestamp\": \"TO_TIMESTAMP\",\n \"TPCH Queries\": \"TPCH\",\n \"Config Options\": \"TPCH\", # we want the same outputs as the tpch test\n \"Unary ops\": \"UNARYOPS\",\n \"Unify Tables\": \"UNIFYTBL\",\n \"Union\": \"UNION\",\n \"Limit\": \"LIMIT\",\n \"Where clause\": \"WHERE\",\n \"Wild Card\": \"WILDCARD\",\n \"Simple String\": \"SSTRING\",\n \"String case\": \"STRINGCASE\",\n \"Message Validation\": \"MESSAGEVAL\"\n }\n\n return switcher.get(test_name)\n\ndef print_fixed_log(\n logger,\n test_name,\n input_type,\n test_id,\n sql,\n resultComparisson,\n error_message,\n load_time,\n engine_time,\n total_time,\n):\n commitHash = get_CommitHash()\n branchName = get_Branch()\n # dateNow=datetime.now()\n inputType = cs.get_extension(input_type)\n\n logger.info(get_QueryId(inputType, test_name, test_id)) # QueryID\n logger.info(Settings.dateNow) # TimeStamp\n logger.info(test_name) # TestGroup\n logger.info(inputType) # InputType\n logger.info(sql) # Query\n logger.info(get_resultId(resultComparisson)) # Result\n logger.info(error_message) # Error\n logger.info(branchName) # PR\n logger.info(commitHash) # CommitHash\n logger.info(Settings.data[\"RunSettings\"][\"nRals\"])\n logger.info(Settings.data[\"RunSettings\"][\"nGPUs\"])\n logger.info(Settings.data[\"TestSettings\"][\"dataDirectory\"])\n logger.info(test_id)\n logger.info(load_time)\n logger.info(engine_time)\n logger.info(total_time)\n\n\ndef print_query_results(\n sql,\n queryId,\n queryType,\n pdf1,\n pdf2,\n resultgdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n):\n if print_result:\n print(\"#BLZ:\")\n print(pdf1)\n if not isinstance(engine, str):\n if isinstance(engine, PyDrill):\n print(\"#DRILL:\")\n else:\n print(\"#PYSPARK:\")\n print(pdf2)\n else:\n if engine==\"drill\":\n print(\"#DRILL:\")\n else:\n print(\"#PYSPARK:\")\n data_type = cs.get_extension(input_type)\n print(str(queryId) + \" Test \" + queryType + \" - \" + data_type)\n print(\"#QUERY:\")\n print(sql)\n print(\"RESULT:\")\n\n error_message = \"\"\n stringResult = \"\"\n\n compareResults = True\n if \"compare_results\" in Settings.data[\"RunSettings\"]:\n compareResults = Settings.data[\"RunSettings\"][\"compare_results\"]\n\n if compareResults:\n columnNamesComparison = compare_column_names(pdf1, pdf2)\n if columnNamesComparison is not True:\n print(\"Columns:\")\n print(pdf1.columns)\n print(pdf2.columns)\n\n error_message = \"Column names are not the same\"\n print(\"ERROR:\")\n print(error_message)\n\n resultComparisson = compare_results(\n pdf1, pdf2, acceptable_difference, use_percentage, engine\n )\n if resultComparisson != \"Success\":\n error_message = resultComparisson[6:]\n print(\"ERROR:\")\n print(error_message)\n\n stringResult = resultComparisson\n if resultComparisson != \"Success\" or columnNamesComparison is False:\n stringResult = \"Fail\"\n else:\n stringResult = \"Success\"\n print(stringResult)\n\n print(\"TOTAL TIME: \")\n print(total_time)\n print(\"CRASHED NODES: \")\n # print(resultgdf.n_crashed_nodes)\n print(\"TOTAL NODES: \")\n # print(resultgdf.total_nodes)\n print(\"===================================================\")\n\n logger = logginghelper(name)\n\n # TODO percy kharoly bindings we need to get the number from internal api\n # print_fixed_log(logger, queryType, queryId, sql, stringResult,\n # error_message, 1, 1, 2)\n print_fixed_log(\n logger,\n queryType,\n input_type,\n queryId,\n sql,\n stringResult,\n error_message,\n load_time,\n engine_time,\n total_time,\n )\n\ndef print_query_results2(sql, queryId, input_type, queryType, error_message, message_validation):\n print(queryId)\n print(\"#QUERY:\")\n print(sql)\n print(\"RESULT:\")\n result = validate_messages(error_message, message_validation)\n print(result)\n print(\"ERROR:\")\n if result==\"Fail\":\n print(error_message)\n else:\n error_message=\"\"\n print(\"CALCITE TIME: \")\n print(\"-\")\n print(\"RAL TIME: \")\n print(\"-\")\n print(\"EXECUTION TIME: \")\n print(\"-\")\n\n print(\"===================================================\")\n\n logger = logginghelper(name)\n\n print_fixed_log(\n logger, queryType, input_type, queryId, sql, result, error_message, None, None, None\n )\n\ndef print_query_results_performance(sql, queryId, queryType, resultgdf):\n print(queryId)\n print(\"#QUERY:\")\n print(sql)\n print(\"RESULT:\")\n resultComparisson = \"Success\"\n print(\"CALCITE TIME: \")\n print(resultgdf.calciteTime)\n print(\"RAL TIME: \")\n print(resultgdf.ralTime)\n print(\"EXECUTION TIME: \")\n print(resultgdf.totalTime)\n\n print(\"===================================================\")\n\n logger = logginghelper(name)\n\n print_fixed_log(\n logger,\n queryType,\n queryId,\n sql,\n resultComparisson,\n \" \",\n resultgdf.calciteTime,\n resultgdf.ralTime,\n resultgdf.totalTime,\n )\n\n\ndef print_query_results_dist(\n sql,\n queryId,\n queryType,\n pdf1,\n pdf2,\n resultgdf,\n acceptable_difference,\n use_percentage,\n print_result,\n):\n if print_result:\n print(\"#BLZ:\")\n print(pdf1)\n print(\"#DRILL:\")\n print(pdf2)\n print(queryId)\n print(\"#QUERY:\")\n print(sql)\n print(\"RESULT:\")\n resultComparisson = compare_results(\n pdf1.values, pdf2.values, acceptable_difference, use_percentage\n )\n error_message = \"\"\n if resultComparisson != \"Success\":\n error_message = resultComparisson[6:]\n resultComparisson = \"Fail\"\n print(resultComparisson)\n print(\"ERROR:\")\n print(error_message)\n else:\n print(resultComparisson)\n print(\"CALCITE TIME: \")\n print(resultgdf.calciteTime)\n print(\"RAL TIME: \")\n print(resultgdf.ralTime)\n print(\"EXECUTION TIME: \")\n print(resultgdf.totalTime)\n\n print(\"===================================================\")\n\n logger = logginghelper(name)\n\n print_fixed_log(\n logger,\n queryType,\n queryId,\n sql,\n resultComparisson,\n error_message,\n None,\n None,\n None,\n )\n\n\nclass Test:\n def __init__(self, test_name):\n self.test_name = test_name\n self.total = 0\n self.success = 0\n self.fail_ids = []\n\n\ndef save_log(gpu_ci_mode=False):\n\n c = 1\n cadena = []\n subcadena = []\n countPass = 0\n countCrash = 0\n\n for x in HANDLER.log:\n if c < 17:\n subcadena.append(x.msg)\n c = c + 1\n else:\n c = 1\n cadena.append(subcadena)\n subcadena = []\n subcadena.append(x.msg)\n c = c + 1\n print()\n cadena.append(subcadena)\n\n # If it didn't run any test (probably some were skipped)\n # then return success\n if cadena == [[]]:\n return True, []\n\n df = pd.DataFrame(\n cadena,\n columns=[\n \"QueryID\",\n \"TimeStamp\",\n \"TestGroup\",\n \"InputType\",\n \"Query\",\n \"Result\",\n \"Error\",\n \"Branch\",\n \"CommitHash\",\n \"nRals\",\n \"nGPUs\",\n \"DataDirectory\",\n \"TestId\",\n \"LoadingTime\",\n \"EngineTotalTime\",\n \"TotalTime\",\n ],\n )\n\n total = df.shape[0]\n\n countPass = df[df.Result == 1].count()[\"Result\"]\n\n df1 = df[\n [\n \"QueryID\",\n \"TimeStamp\",\n \"TestGroup\",\n \"InputType\",\n \"Query\",\n \"Result\",\n \"Error\",\n \"Branch\",\n \"CommitHash\",\n \"nRals\",\n \"nGPUs\",\n \"DataDirectory\",\n \"LoadingTime\",\n \"EngineTotalTime\",\n \"TotalTime\",\n ]\n ].copy()\n\n create_summary_detail(df, gpu_ci_mode)\n\n printSummary(countPass, countCrash, total, gpu_ci_mode)\n\n if not gpu_ci_mode:\n saveLogInFile(df1)\n\n saveLog = False\n if \"saveLog\" in Settings.data[\"RunSettings\"]:\n saveLog = Settings.data[\"RunSettings\"][\"saveLog\"]\n\n print(\"saveLog = \" + str(saveLog))\n\n # TODO william kharoly felipe we should try to enable and use\n # this function in the future\n # result, error_msgs = verify_prev_google_sheet_results(df1)\n result, error_msgs = True, []\n\n if result is True and saveLog == \"true\":\n saving_google_sheet_results(df1)\n else:\n if countPass < total:\n result, error_msgs = False, []\n else:\n result, error_msgs = True, []\n\n loggingClose(name)\n return result, error_msgs\n\n\ndef create_summary_detail(df, no_color):\n pdf = df\n pdf[\"Result\"] = df[\"Result\"].replace(1, \"Success\")\n pdf[\"Result\"] = df[\"Result\"].replace(0, \"Fail\")\n\n # making boolean series for a team name\n filter_fail = pdf[\"Result\"] == \"Fail\"\n\n # filtering data\n pdf2 = pdf.where(filter_fail)\n pdf_fail = pdf2.dropna()\n\n if no_color:\n green = \"\"\n yellow = \"\"\n # red = \"\"\n endc = \"\"\n else:\n green = bcolors.OKGREEN\n yellow = bcolors.WARNING\n # red = bcolors.FAIL\n endc = bcolors.ENDC\n\n # display\n print(green + \"========================================================\")\n print(\"DETAILED SUMMARY TESTS\")\n print(\"========================================================\" + endc)\n pd.set_option(\"max_rows\", 1500)\n print(pdf.groupby([\"TestGroup\", \"InputType\"])[\"Result\"].value_counts())\n print(yellow + \"========================================================\")\n print(\"FAILED TESTS\" + yellow)\n print(\"========================================================\" + endc)\n # pd.set_option('max_columns', 5)\n # pd.set_option('max_colwidth', 1000)\n\n pd.set_option(\"display.max_columns\", None)\n pd.set_option(\"display.width\", 2000)\n pd.set_option(\"display.float_format\", \"{:20,.2f}\".format)\n pd.set_option(\"display.max_colwidth\", None)\n print(\n pdf_fail.groupby([\"TestGroup\", \"InputType\", \"Result\"])[\"TestId\"]\n .apply(\",\".join)\n .reset_index()\n )\n\n\n# This function use the google spreadsheet to compare the current results\n# against historic ones\n# Returns a tuple with 2 entries:\n# 1st element: False in case gpuci should be fail, True otherwise\n# 2nd element: A list of error messages (in case 1st element is False)\n# Example:\n# result, error_msgs = verify_prev_google_sheet_results(log_pdf)\n# if result == False:\n# exits the python process and do not move to next steps\n# TODO william kharoly felipe we should try to enable and use\n# this function in the future\ndef _verify_prev_google_sheet_results(log_pdf):\n import gspread\n from oauth2client.service_account import ServiceAccountCredentials\n\n def get_the_data_from_sheet():\n # Use creds to create a client to interact with the Google Drive API\n scope = [\n \"https://www.googleapis.com/auth/drive\",\n \"https://spreadsheets.google.com/feeds\",\n ]\n # Using credentials from BlazingSQL\n # os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']\n # # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest\n # current_dir = \"/home/ubuntu/.conda/envs/e2e\"\n\n log_info = Settings.data[\"RunSettings\"][\"logInfo\"]\n\n if log_info == \"\":\n print(\n \"\"\"####### ======= >>>>>>> WARNING this test run will not\n be compared against old results from Google Docs. Define\n the env var BLAZINGSQL_E2E_LOG_INFO\"\"\"\n )\n return None\n\n log_info = json.loads(log_info)\n creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(\n log_info, scope\n )\n client_blazing = gspread.authorize(creds_blazing)\n # Find a Locally workbook by name and open a sheet\n work_sheet = \"BSQL Log Results\"\n\n if \"worksheet\" in Settings.data[\"RunSettings\"]:\n work_sheet = Settings.data[\"RunSettings\"][\"worksheet\"]\n\n sheet_blazing = client_blazing.open(\"BSQL End-to-End Tests\").worksheet(\n work_sheet\n )\n # Writing log results into Blazing sheet\n ret = pd.DataFrame(sheet_blazing.get_all_records())\n # NOTE percy kharo william we need to patch these columns\n # before convert to parquet\n ret[\"LoadingTime\"] = ret[\"LoadingTime\"].astype(str)\n ret[\"EngineTotalTime\"] = ret[\"EngineTotalTime\"].astype(str)\n ret[\"TotalTime\"] = ret[\"TotalTime\"].astype(str)\n return ret\n\n dir_log = Settings.data[\"TestSettings\"][\"logDirectory\"]\n gspreadCacheHint = Settings.data[\"RunSettings\"][\"gspreadCacheHint\"]\n gspread_e2e_cache_path = dir_log + \"/e2e-gspread-cache.parquet\"\n\n gspread_df = None\n\n if gspreadCacheHint == \"false\":\n gspread_df = get_the_data_from_sheet()\n if gspread_df is not None:\n # Always save a cache (so when gspreadCacheHint\n # is false will refresh the cache)\n gspread_df.to_parquet(gspread_e2e_cache_path)\n elif gspreadCacheHint == \"true\":\n if os.path.isfile(gspread_e2e_cache_path):\n gspread_df = pd.read_parquet(gspread_e2e_cache_path)\n else:\n gspread_df = get_the_data_from_sheet()\n if gspread_df is not None:\n gspread_df.to_parquet(gspread_e2e_cache_path)\n\n if gspread_df is None:\n error_msg = \"\"\"ERROR: This test run could not be compared\n against old results from Google Docs\"\"\"\n return False, [error_msg]\n\n log_pdf_copy = log_pdf.copy()\n prev_nrals = gspread_df[\"nRALS\"][0]\n curr_nrals = Settings.data[\"RunSettings\"][\"nRals\"]\n\n # Assume prev_nrals == curr_nrals\n last_e2e_run_id = gspread_df[\"Timestamp\"][0]\n # NOTE If prev_nrals != curr_nrals we need to search the first\n # Timestamp (a.k.a ID) for the current nRals target\n if prev_nrals != curr_nrals:\n gspread_df_uniques = gspread_df.drop_duplicates()\n gspread_df_uniques_target_nrals = gspread_df_uniques.loc[\n gspread_df_uniques[\"nRALS\"] == curr_nrals\n ]\n last_e2e_run_id = gspread_df_uniques_target_nrals.iloc[\n 0, 1\n ] # select the first Timestamp from the unique values\n\n print(\n \"####### ======= >>>>>>> E2E INFO: We will compare the\"\n + \" current run against the ID (Timestamp): \"\n + last_e2e_run_id\n )\n\n last_e2e_run_df = gspread_df.loc[gspread_df[\"Timestamp\"] == last_e2e_run_id]\n\n # NOTE percy kharo william we need to rename some columns to use our dfs\n log_pdf_copy = log_pdf_copy.rename(\n columns={\n \"TestGroup\": \"Test Group\",\n \"InputType\": \"Input Type\",\n \"nRals\": \"nRALS\",\n \"DataDirectory\": \"data_dir\",\n }\n )\n\n # NOTE For debugging\n # log_pdf_copy['TimeStamp'] = log_pdf_copy['TimeStamp'].astype(str)\n # log_pdf_copy.to_parquet('/home/percy/workspace/logtest/ultimo.parquet',\n # compression='GZIP')\n # log_pdf_copy = pd.read_parquet('/home/user/last_run_log_df.parquet')\n\n error_msgs = []\n\n prev_summary = last_e2e_run_df.groupby(\"Test Group\").count()\n curr_summary = log_pdf_copy.groupby(\"Test Group\").count()\n\n prev_test_groups = prev_summary.index.tolist()\n curr_test_groups = curr_summary.index.tolist()\n\n has_less_test_groups = len(prev_test_groups) > len(curr_test_groups)\n\n # Check if someone deleted some tests\n # (there more test groups in the sheet)\n if has_less_test_groups:\n list_difference = [\n item for item in prev_test_groups if item not in curr_test_groups\n ]\n error_msg = (\n \"ERROR: current e2e has less test groups than\"\n + \" previous run, delta is %s\" % list_difference\n )\n error_msgs.append(error_msg)\n\n # Just check the common test groups\n if has_less_test_groups:\n test_groups = curr_test_groups\n else:\n test_groups = prev_test_groups\n\n for test_group in test_groups:\n prev_test_group_df = last_e2e_run_df.loc[\n last_e2e_run_df[\"Test Group\"] == test_group\n ]\n prev_input_types = (\n prev_test_group_df.groupby(\"Input Type\").count().index.tolist()\n )\n\n curr_test_group_df = log_pdf_copy.loc[log_pdf_copy[\"Test Group\"] == test_group]\n cur_input_typ = curr_test_group_df.groupby(\"Input Type\").count().index.tolist()\n\n has_less_input_types = len(prev_input_types) > len(cur_input_typ)\n\n if has_less_input_types is True:\n list_difference = [\n item for item in prev_input_types if item not in cur_input_typ\n ]\n error_msg = \"\"\"ERROR: current test group %s has less\n input types cases, delta is %s\"\"\" % (\n test_group,\n list_difference,\n )\n error_msgs.append(error_msg)\n\n for input_type in prev_input_types:\n prev_tests_df = prev_test_group_df.loc[\n prev_test_group_df[\"Input Type\"] == input_type\n ]\n prev_tests_df.sort_values(by=[\"QueryID\"])\n\n curr_tests_df = curr_test_group_df.loc[\n curr_test_group_df[\"Input Type\"] == input_type\n ]\n curr_tests_df.sort_values(by=[\"QueryID\"])\n\n # We need to make a copy since we are going to drop some row\n prev_tests_df = prev_tests_df.copy()\n curr_tests_df = curr_tests_df.copy()\n\n # NOTE for debugging\n # print(\"============================================PREV!\")\n # print(prev_tests_df.head())\n # print(len(prev_tests_df))\n # print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxCURR!\")\n # print(curr_tests_df.head())\n # print(len(curr_tests_df))\n\n # Check if current run has less tests than previous run\n len_prev_tests_df = len(prev_tests_df)\n len_curr_tests_df = len(curr_tests_df)\n has_less_tests = len_prev_tests_df > len_curr_tests_df\n\n # NOTE for debugging\n # print(\"====== PREV TESTS ======\")\n # print(prev_tests_df)\n # print(\"====== CURR TESTS ======\")\n # print(curr_tests_df)\n\n if has_less_tests:\n prev_tests = prev_tests_df[\"QueryID\"].tolist()\n curr_tests = curr_tests_df[\"QueryID\"].tolist()\n list_difference = [\n item for item in prev_tests if item not in curr_tests\n ]\n error_msg = \"\"\"ERROR: The test group %s has less tests than\n previous run for input type %s, delta is %s\"\"\" % (\n test_group,\n input_type,\n list_difference,\n )\n error_msgs.append(error_msg)\n\n n = len_prev_tests_df - len_curr_tests_df\n prev_tests_df.drop(prev_tests_df.tail(n).index, inplace=True)\n elif len_prev_tests_df < len_curr_tests_df:\n n = len_curr_tests_df - len_prev_tests_df\n curr_tests_df.drop(curr_tests_df.tail(n).index, inplace=True)\n\n prev_tests_results = prev_tests_df[\"Result\"].to_list()\n curr_tests_results = curr_tests_df[\"Result\"].to_list()\n\n for i in range(0, len(prev_tests_results)):\n prev_test_result = prev_tests_results[i]\n curr_test_result = curr_tests_results[i]\n\n if prev_test_result == 1 and curr_test_result == 0:\n error_msg = \"\"\"ERROR: Test %d for %s (%s) is now failing\n but before was ok!\"\"\" % (\n i + 1,\n test_group,\n input_type,\n )\n error_msgs.append(error_msg)\n\n succs = len(error_msgs) == 0\n return succs, error_msgs\n\n\ndef saving_google_sheet_results(log_pdf):\n import gspread\n from oauth2client.service_account import ServiceAccountCredentials\n\n log_info = Settings.data[\"RunSettings\"][\"logInfo\"]\n\n if log_info == \"\":\n print(\n \"\"\"####### ======= >>>>>>> WARNING this test run will\n not save its results into the Google spreadsheet.\"\"\"\n )\n return\n\n # Create an empty list\n log_list = []\n\n # Iterate over each row\n for index, rows in log_pdf.iterrows():\n # Create a list for the current row (ADDS)\n current_list = [\n rows.QueryID,\n str(rows.TimeStamp),\n str(rows.TestGroup),\n rows.InputType,\n rows.Query,\n rows.Result,\n rows.Error,\n rows.Branch,\n str(rows.CommitHash),\n rows.nRals,\n rows.nGPUs,\n rows.DataDirectory,\n rows.LoadingTime,\n rows.EngineTotalTime,\n rows.TotalTime,\n ]\n\n # append the list to the final list\n log_list.append(current_list)\n # Use creds to create a client to interact with the Google Drive API\n scope = [\n \"https://www.googleapis.com/auth/drive\",\n \"https://spreadsheets.google.com/feeds\",\n ]\n # === 1. BlazingSQL =====\n # Using credentials from BlazingSQL\n # os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']\n # # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest\n current_dir = \"/home/ubuntu/.conda/envs/e2e\"\n print(current_dir)\n\n log_info = json.loads(log_info)\n creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(log_info, scope)\n client_blazing = gspread.authorize(creds_blazing)\n # Find a Locally workbook by name and open a sheet\n work_sheet = \"BSQL Log Results\"\n if \"worksheet\" in Settings.data[\"RunSettings\"]:\n work_sheet = Settings.data[\"RunSettings\"][\"worksheet\"]\n blaz_googlesheat = client_blazing.open(\"BSQL End-to-End Tests\")\n sheet_blazing = blaz_googlesheat.worksheet(work_sheet)\n # Writing log results into Blazing sheet\n total_queries = len(log_list)\n for i in range(0, total_queries):\n sheet_blazing.append_row(log_list[i])\n time.sleep(1)\n\n print(\"\\nTable was uptdated into Blazing Google SpreadSheet\")\n\n\ndef saveLogInFile(df):\n dir_log = Settings.data[\"TestSettings\"][\"logDirectory\"]\n filepath = getFileName(dir_log)\n df.to_excel(filepath, index=False)\n\ndef validate_messages(error_message, message_validation):\n error_message = error_message.replace('\\n', ' ').replace('\\r', ' ')\n message_validation = message_validation.replace('\\n', ' ').replace('\\r', ' ')\n error_message = error_message.replace(' ', '')\n message_validation = message_validation.replace(' ', '')\n\n if error_message == message_validation:\n result = \"Success\"\n else:\n result = \"Fail\"\n\n return result\n\nclass bcolors:\n HEADER = \"\\033[95m\"\n OKBLUE = \"\\033[94m\"\n OKGREEN = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n ENDC = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n\n\ndef on_jenkins():\n # NOTE For more env vars see\n # https://wiki.jenkins.io/display/JENKINS/Building+a+software+project\n jenkins_job = os.environ.get(\"JOB_NAME\")\n if jenkins_job is not None:\n return True\n\n return False\n\n\ndef print_tests(tests, onlyFails=False):\n print(\n \"\"\"************************************************************\n *******************\"\"\"\n )\n\n tab = \" \"\n\n failedPrefix = \"\"\n if onlyFails:\n failedPrefix = \"FAILED\"\n\n # TODO percy check None\n for extension in tests:\n if onlyFails:\n if extension == \"parquet\":\n print(\n \"!!!!!!!!!!!!!!!! \"\n + failedPrefix\n + \" \"\n + extension\n + \" TESTS !!!!!!!!!!!!\"\n )\n else:\n print(\n \"!!!!!!!!!!!!!!!! \"\n + failedPrefix\n + \" \"\n + extension\n + \" TESTS !!!!!!!!!!!!!!!!\"\n )\n else:\n if extension == \"parquet\":\n print(\"################ \" + extension + \" TESTS ############\")\n else:\n print(\"############## \" + extension + \" TESTS ##############\")\n\n testNames = tests.get(extension)\n for testName in testNames:\n test = testNames.get(testName)\n\n total = test.get(\"total\")\n countPass = test.get(\"countPass\")\n countCrash = test.get(\"countCrash\")\n failIds = test.get(\"failIds\")\n\n showTest = False\n\n if onlyFails:\n if len(failIds) > 0:\n showTest = True\n print(tab + \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n else:\n showTest = True\n print(tab + \"++++++++++++++++++++++++++++++++\")\n\n if showTest:\n green = bcolors.OKGREEN\n yellow = bcolors.WARNING\n red = bcolors.FAIL\n endc = bcolors.ENDC\n\n # don't use colors since jenkins doesn't support ansi chars\n if on_jenkins():\n green = \"\"\n yellow = \"\"\n red = \"\"\n endc = \"\"\n\n print(\n tab\n + \"SUMMARY for \"\n + failedPrefix\n + \" test suite: \"\n + testName\n + \" - \"\n + extension\n )\n\n if not onlyFails:\n pass_green = green\n pass_endc = endc\n if (\n countPass != total\n ): # if no full pass then don't use green colors here\n pass_green = \"\"\n pass_endc = \"\"\n\n print(\n pass_green\n + tab\n + \"PASSED: \"\n + str(countPass)\n + \"/\"\n + str(total)\n + pass_endc\n )\n\n fails = total - countPass - countCrash\n yellow_fail = yellow\n yellow_endc = endc\n if fails == 0:\n yellow_fail = \"\"\n yellow_endc = \"\"\n\n print(\n yellow_fail\n + tab\n + \"FAILED: \"\n + str(fails)\n + \"/\"\n + str(total)\n + \" \"\n + str(failIds)\n + yellow_endc\n )\n\n red_crash = red\n red_endc = endc\n\n # if no crashes then don't use red colors here\n if countCrash == 0:\n red_crash = \"\"\n red_endc = \"\"\n\n print(\n red_crash\n + tab\n + \"CRASH: \"\n + str(countCrash)\n + \"/\"\n + str(total)\n + red_endc\n )\n\n if not onlyFails:\n print(tab + \"TOTAL: \" + str(total))\n\n\ndef printSummary(countPass, countCrash, total, no_color):\n\n if no_color:\n green = \"\"\n yellow = \"\"\n red = \"\"\n endc = \"\"\n else:\n green = bcolors.OKGREEN\n yellow = bcolors.WARNING\n red = bcolors.FAIL\n endc = bcolors.ENDC\n\n # Second: print the global summary (totals from all the tests)\n fails = total - countPass - countCrash\n print(\n \"\"\"**********************************************************\n *********************\"\"\"\n )\n print(\"TOTAL SUMMARY for test suite: \")\n print(green + \"PASSED: \" + str(countPass) + \"/\" + str(total) + endc)\n print(yellow + \"FAILED: \" + str(fails) + \"/\" + str(total) + endc)\n print(red + \"CRASH: \" + str(countCrash) + \"/\" + str(total) + endc)\n print(\"TOTAL: \" + str(total))\n\n\ndef getFileName(dir_log):\n\n fecha = time.strftime(\"%H%M%S\")\n hora = time.strftime(\"%I%M%S\")\n return dir_log + \"LogTest\" + fecha + hora + \".xlsx\" #\n\n\n# ===========================================================================\n\ntableNames = [\n \"customer\",\n \"orders\",\n \"supplier\",\n \"lineitem\",\n \"part\",\n \"partsupp\",\n \"nation\",\n \"region\",\n \"perf\",\n \"acq\",\n \"names\",\n \"bool_orders\",\n \"web_site\",\n \"web_sales\",\n \"web_returns\",\n \"web_page\",\n \"web_clickstreams\",\n \"warehouse\",\n \"time_dim\",\n \"store_sales\",\n \"store_returns\",\n \"store\",\n \"ship_mode\",\n \"reason\",\n \"promotion\",\n \"product_reviews\",\n \"item_marketprices\",\n \"item\",\n \"inventory\",\n \"income_band\",\n \"household_demographics\",\n \"date_dim\",\n \"customer_demographics\",\n \"customer_address\",\n \"customer\",\n \"split\",\n \"docked\",\n \"smiles\",\n \"dcoids\",\n]\n\n\ndef get_table_occurrences(query):\n res = []\n for name in tableNames:\n if query.find(name) != -1:\n res.append(name)\n return res\n\n\ndef replace_all(text, dic):\n for i, j in dic.items():\n text = re.sub(r\"\\s%s(\\s|$|\\,)\" % i, j, text)\n return text\n\n\ndef get_blazingsql_query(db_name, query):\n new_query = query\n for table_name in get_table_occurrences(query):\n new_query = replace_all(\n new_query,\n {table_name: \" %(table)s \" % {\"table\": db_name + \".\" + table_name}},\n )\n return new_query\n\n\ndef get_drill_query(query):\n new_query = query\n for table_name in get_table_occurrences(query):\n new_query = replace_all(\n new_query, {table_name: \" dfs.tmp.`%(table)s` \" % {\"table\": table_name}}\n )\n return new_query\n\n\n# ================================================================================================================\n\n\ndef run_query_drill(drill, query_str):\n timeout = 400\n query_result = drill.query(query_str, timeout)\n df = query_result.to_dataframe()\n if df.size == 0:\n return Result(query_result.columns, df, None)\n df = df[query_result.columns]\n result = Result(query_result.columns, df, None)\n return result\n\n\ndef run_query_spark(spark, query_str):\n query_result = spark.sql(query_str)\n df = query_result.toPandas()\n if df.size == 0:\n return Result(query_result.columns, df, None)\n df = df[query_result.columns]\n result = Result(query_result.columns, df, None)\n return result\n\n\ndef save_results_arrow(filename, pdf2):\n # save results\n import pyarrow as pa\n\n table = pa.Table.from_pandas(pdf2)\n # schema = pa.Schema.from_pandas(pdf2)\n with open(filename, \"bw\") as f:\n writer = pa.RecordBatchFileWriter(f, table.schema)\n writer.write(table)\n writer.close()\n\n\ndef save_results_parquet(filename, pdf2):\n pdf2.to_parquet(filename, compression=\"GZIP\")\n\n\ndef run_query(\n bc,\n engine,\n query,\n queryId,\n queryType,\n worder,\n orderBy,\n acceptable_difference,\n use_percentage,\n input_type,\n **kwargs\n):\n print(query)\n\n query_spark = kwargs.get(\"query_spark\", query)\n\n algebra = kwargs.get(\"algebra\", \"\")\n\n nRals = Settings.data[\"RunSettings\"][\"nRals\"]\n\n print_result = kwargs.get(\"print_result\")\n if print_result is None:\n print_result = False\n\n message_validation = kwargs.get(\"message_validation\", \"\")\n if message_validation is None:\n message_validation = False\n\n data_type = cs.get_extension(input_type)\n\n if Settings.execution_mode != \"Generator\":\n print(\n \"\\n=============== New query: \"\n + str(queryId)\n + \" - \"\n + data_type\n + \" =================\"\n )\n\n load_time = 0\n engine_time = 0\n total_time = 0\n\n nested_query = kwargs.get(\"nested_query\", False)\n \n error_message = \"\"\n\n if not nested_query:\n # if int(nRals) == 1: # Single Node\n query_blz = query # get_blazingsql_query('main', query)\n if algebra == \"\":\n start_time = time.time()\n try:\n result_gdf = bc.sql(query_blz)\n except Exception as e:\n error_message=str(e)\n\n if not message_validation:\n end_time = time.time()\n total_time = (end_time - start_time) * 1000\n # SUM(CASE WHEN info = 'evaluate_split_query load_data' THEN\n # duration ELSE 0 END) AS load_time,\n # MAX(load_time) AS load_time,\n # log_result = bc.log(\n # \"\"\"SELECT\n # MAX(end_time) as end_time, query_id,\n # MAX(total_time) AS total_time\n # FROM (\n # SELECT\n # query_id, node_id,\n # SUM(CASE WHEN info = 'Query Execution Done' THEN\n # duration ELSE 0 END) AS total_time,\n # MAX(log_time) AS end_time\n # FROM\n # bsql_logs\n # WHERE\n # info = 'evaluate_split_query load_data'\n # OR info = 'Query Execution Done'\n # GROUP BY\n # node_id, query_id\n # )\n # GROUP BY\n # query_id\n # ORDER BY\n # end_time DESC limit 1\"\"\"\n # )\n\n # if int(nRals) == 1: # Single Node\n # n_log = log_result\n # else: # Simple Distribution\n # n_log = log_result.compute()\n\n load_time = 0 # n_log['load_time'][0]\n engine_time = 0 #n_log[\"total_time\"][0]\n else:\n result_gdf = bc.sql(query_blz, algebra=algebra)\n\n else: # for nested queries as column basis test\n result_gdf = kwargs.get(\"blz_result\", [])\n\n str_code_test = str(get_codTest(queryType)).upper()\n filename = str_code_test + \"-\" + str(queryId) + \".parquet\"\n\n result_dir = Settings.data[\"TestSettings\"][\"fileResultsDirectory\"]\n file_results_dir = str(result_dir)\n\n\n if not message_validation== \"\":\n print_query_results2(\n query,\n queryId,\n input_type,\n queryType,\n error_message,\n message_validation\n )\n elif not isinstance(engine, str):\n if isinstance(engine, PyDrill):\n # Drill\n query_drill = get_drill_query(query)\n result_drill_gd = run_query_drill(engine, query_drill)\n if result_gdf is not None:\n if result_gdf.columns is not None:\n # FOR DASK CUDF\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n expected_dtypes = result_gdf.dtypes.to_list()\n pdf1 = (\n upcast_to_float(result_gdf)\n .fillna(get_null_constants(result_gdf))\n .to_pandas()\n )\n pdf2 = to_pandas_f64_engine(\n result_drill_gd.resultSet, expected_dtypes\n )\n pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))\n formatResults(pdf1, pdf2, worder, orderBy)\n\n if Settings.execution_mode == ExecutionMode.GENERATOR:\n file_res_drill_dir = (\n file_results_dir + \"/\" + \"drill\" + \"/\" + filename\n )\n\n if not os.path.exists(file_res_drill_dir):\n save_results_parquet(file_res_drill_dir, pdf2)\n\n print(\"Drill: \" + filename + \" generated.\")\n\n else:\n print_query_results(\n query,\n queryId,\n queryType,\n pdf1,\n pdf2,\n result_gdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n )\n\n else:\n print_query_results2(\n query, queryId, queryType, result_gdf.error_message\n )\n elif isinstance(engine, SparkSession):\n # Spark\n result_spark_df = run_query_spark(engine, query_spark)\n\n if result_gdf is not None:\n if result_gdf.columns is not None:\n\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n expected_dtypes = result_gdf.dtypes.to_list()\n pdf1 = (\n upcast_to_float(result_gdf)\n .fillna(get_null_constants(result_gdf))\n .to_pandas()\n )\n pdf2 = to_pandas_f64_engine(\n result_spark_df.resultSet, expected_dtypes\n )\n pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))\n formatResults(pdf1, pdf2, worder, orderBy)\n\n if Settings.execution_mode == ExecutionMode.GENERATOR:\n\n file_res_drill_dir = (\n file_results_dir + \"/\" + \"spark\" + \"/\" + filename\n )\n\n if not os.path.exists(file_res_drill_dir):\n save_results_parquet(file_res_drill_dir, pdf2)\n print(\"Spark: \" + filename + \" generated.\")\n\n else:\n print_query_results(\n query_spark,\n queryId,\n queryType,\n pdf1,\n pdf2,\n result_gdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n )\n else:\n print_query_results2(\n query_spark, queryId, queryType, result_gdf.error_message\n )\n else: # GPUCI\n\n compareResults = True\n if \"compare_results\" in Settings.data[\"RunSettings\"]:\n compareResults = Settings.data[\"RunSettings\"][\"compare_results\"]\n\n if compareResults == \"true\":\n resultFile = file_results_dir + \"/\" + str(engine) + \"/\" + filename\n pdf2 = get_results(resultFile)\n if result_gdf is not None:\n if result_gdf.columns is not None:\n # FOR DASK CUDF\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n expected_dtypes = result_gdf.dtypes.to_list()\n pdf1 = (\n upcast_to_float(result_gdf)\n .fillna(get_null_constants(result_gdf))\n .to_pandas()\n )\n format_pdf(pdf1, worder, orderBy)\n print(pdf2)\n\n print_query_results(\n query,\n queryId,\n queryType,\n pdf1,\n pdf2,\n result_gdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n )\n\n else:\n print_query_results2(\n query, queryId, queryType, result_gdf.error_message\n )\n else:\n if result_gdf is not None:\n if result_gdf.columns is not None:\n # FOR DASK CUDF\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n expected_dtypes = result_gdf.dtypes.to_list()\n pdf1 = (\n upcast_to_float(result_gdf)\n .fillna(get_null_constants(result_gdf))\n .to_pandas()\n )\n pdf2 = pd.DataFrame()\n formatResults(pdf1, pdf2, worder, orderBy)\n\n print_query_results(\n query,\n queryId,\n queryType,\n pdf1,\n pdf2,\n result_gdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n )\n else:\n print_query_results2(\n query, queryId, queryType, result_gdf.error_message\n )\n\ndef run_query_log(\n bc,\n query,\n queryId,\n queryType,\n **kwargs\n):\n result_gdf = None\n error_message = \"\"\n message_validation = \"\"\n\n try:\n result_gdf = bc.log(query)\n except Exception as e:\n error_message=str(e)\n\n if result_gdf is not None:\n if result_gdf.columns is not None:\n # FOR DASK CUDF\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n print_query_results2(\n query, queryId, DataType.CUDF, queryType, error_message, message_validation\n )\n else:\n print_query_results2(\n query, queryId, DataType.CUDF, queryType, error_message, message_validation\n )\n\ndef run_query_performance(\n bc,\n drill,\n query,\n queryId,\n queryType,\n worder,\n orderBy,\n acceptable_difference,\n use_percentage,\n **kwargs\n):\n # Blazing\n query_blz = query # get_blazingsql_query('main', query)\n result_gdf = bc.sql(query_blz).get()\n if result_gdf.error_message == \"\":\n print_query_results_performance(query, queryId, queryType, result_gdf)\n else:\n print_query_results2(query, queryId, queryType, result_gdf.error_message)\n\n\ndef formatResults(pdf1, pdf2, worder, orderBy):\n if worder == 1 and pdf1.size != 0 and pdf2.size != 0:\n if len(pdf1.columns) == len(pdf2.columns):\n pdf1.sort_values(\n [orderBy] if orderBy else pdf1.columns.to_list(), inplace=True\n )\n pdf2.sort_values(\n [orderBy] if orderBy else pdf2.columns.to_list(), inplace=True\n )\n\n\ndef format_pdf(pdf, worder, orderBy):\n if worder == 1 and pdf.size != 0:\n pdf.sort_values([orderBy] if orderBy else pdf.columns.to_list(), inplace=True)\n\n\ndef get_results(result_file):\n df = pd.read_parquet(result_file)\n\n return df\n" ]
[ [ "numpy.allclose", "pandas.read_parquet", "pandas.to_numeric", "numpy.dtype", "numpy.issubdtype", "pandas.DataFrame", "pandas.set_option", "numpy.all", "numpy.datetime64", "numpy.warnings.filterwarnings" ] ]
andyj10224/psi4numpy
[ "cbef6ddcb32ccfbf773befea6dc4aaae2b428776" ]
[ "Moller-Plesset/MP3.py" ]
[ "\"\"\"\nReference implementation for the correlation energy of MP3 with an RHF reference.\n\nReferences:\n- Equations from [Szabo:1996]\n\"\"\"\n\n__authors__ = \"Daniel G. A. Smith\"\n__credits__ = [\"Daniel G. A. Smith\", \"Dominic A. Sirianni\"]\n\n__copyright__ = \"(c) 2014-2018, The Psi4NumPy Developers\"\n__license__ = \"BSD-3-Clause\"\n__date__ = \"2017-05-23\"\n\nimport time\nimport numpy as np\nnp.set_printoptions(precision=5, linewidth=200, suppress=True)\nimport psi4\n\n# Memory for Psi4 in GB\npsi4.set_memory('2 GB')\npsi4.core.set_output_file('output.dat', False)\n\n# Memory for numpy in GB\nnumpy_memory = 2\n\nmol = psi4.geometry(\"\"\"\nO\nH 1 1.1\nH 1 1.1 2 104\nsymmetry c1\n\"\"\")\n\npsi4.set_options({'basis': 'aug-cc-pvdz',\n 'scf_type': 'pk',\n 'guess': 'core',\n 'mp2_type': 'conv',\n 'mp_type': 'conv',\n 'freeze_core': 'false',\n 'e_convergence': 1e-8,\n 'd_convergence': 1e-8})\n\n# First compute RHF energy using Psi4\nscf_e, wfn = psi4.energy('SCF', return_wfn=True)\n\n# Coefficient Matrix\nC = np.array(wfn.Ca())\n# Double occupied orbitals\nndocc = wfn.doccpi()[0]\n# Number of molecular orbitals\nnmo = wfn.nmo()\n# SCF energy\nSCF_E = wfn.energy()\n# Orbital energies\neps = wfn.epsilon_a()\neps = np.array([eps.get(x) for x in range(C.shape[0])])\n\n# Compute size of ERI tensor in GB\nERI_Size = (nmo**4)*8.0 / 1E9\nprint(\"Size of the ERI tensor will be %4.2f GB.\" % ERI_Size)\nmemory_footprint = ERI_Size*2.5\nif memory_footprint > numpy_memory:\n clean()\n raise Exception(\"Estimated memory utilization (%4.2f GB) exceeds numpy_memory limit of %4.2f GB.\" % (memory_footprint, numpy_memory))\n\n# Integral generation from Psi4's MintsHelper\nt = time.time()\nmints = psi4.core.MintsHelper(wfn.basisset())\nI = np.array(mints.ao_eri())\nI = I.reshape(nmo, nmo, nmo, nmo)\n\nprint('\\nTotal time taken for ERI integrals: %.3f seconds.' % (time.time()-t))\n\nt=time.time()\n\n# Complete the AOpqrs -> MOiajb step\nMO = np.einsum('rJ,pqrs->pqJs', C, I)\nMO = np.einsum('pI,pqJs->IqJs', C, MO)\nMO = np.einsum('sB,IqJs->IqJB', C, MO)\nMO = np.einsum('qA,IqJB->IAJB', C, MO)\n\n# (pq|rs) -> <ps|rq>\nMO = MO.swapaxes(1, 2)\n\nprint('\\nTotal time taken for integral transformation: %.f seconds' % (time.time()-t))\nprint('Shape of MO integrals %s \\n' % str(MO.shape))\n\n# Build epsilon tensor\neocc = eps[:ndocc]\nevirt = eps[ndocc:]\nepsilon = 1/(eocc.reshape(-1, 1, 1, 1) + eocc.reshape(-1, 1, 1) - evirt.reshape(-1, 1) - evirt)\n\n# Build o and v slices\no = slice(0, ndocc)\nv = slice(ndocc, MO.shape[0])\n\n### MP2 correlation energy\n\nMP2corr_E = 2 * np.einsum('abrs,rsab,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)\nMP2corr_E -= np.einsum('abrs,rsba,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)\nMP2total_E = SCF_E + MP2corr_E\nprint('MP2 correlation energy: %16.8f' % MP2corr_E)\nprint('MP2 total energy: %16.8f' % MP2total_E)\npsi4.compare_values(psi4.energy('MP2'), MP2total_E, 6, 'MP2 Energy')\n\nprint('\\n Starting MP3 energy...')\nt = time.time()\n\n# MP3 Correlation energy\n\n# Prefactors taken from terms in unnumbered expression for spatial-orbital MP3\n# energy on [Szabo:1996] pp. (bottom) 367 - (top) 368. Individual equations taken\n# from [Szabo:1996] Tbl. 6.2 pp. 364-365\n\n# Equation 1: 3rd order diagram 1\nMP3corr_E = 2.0 * np.einsum('abru,ruts,tsab,abru,abts', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon) \n# Equation 2: 3rd order diagram 2 \nMP3corr_E += 2.0 * np.einsum('adrs,cbad,rscb,adrs,cbrs', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 3: 3rd order diagram 3\nMP3corr_E += -4.0 * np.einsum('acrt,rbsc,stab,acrt,abst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 4: 3rd order diagram 4\nMP3corr_E += -4.0 * np.einsum('bcrt,rasb,stac,bcrt,acst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 5: 3rd order diagram 5\nMP3corr_E += 8.0 * np.einsum('acrt,btsc,rsab,acrt,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 6: 3rd order diagram 6\nMP3corr_E += 2.0 * np.einsum('cbrt,atsc,rsab,cbrt,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 7: 3rd order diagram 7\nMP3corr_E += -1.0 * np.einsum('acrs,dbac,srdb,acrs,dbrs', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 8: 3rd order diagram 8\nMP3corr_E += -1.0 * np.einsum('abrt,trus,usab,abtr,abus', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon)\n# Equation 9: 3rd order diagram 9\nMP3corr_E += 2.0 * np.einsum('bcrt,arbs,tsac,cbrt,acst', MO[o, o, v, v], MO[o, v, o, v], MO[v, v, o, o], epsilon, epsilon)\n# Equation 10: 3rd order diagram 10\nMP3corr_E += 2.0 * np.einsum('cbrt,rasb,stac,cbrt,acst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 11: 3rd order diagram 11\nMP3corr_E += -4.0 * np.einsum('abrs,scat,rtbc,abrs,cbrt', MO[o, o, v, v], MO[v, o, o, v], MO[v, v, o, o], epsilon, epsilon)\n# Equation 12: 3rd order diagram 12\nMP3corr_E += -4.0 * np.einsum('bcrt,atsc,rsab,bctr,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)\n\nprint('...took %.3f seconds to compute MP3 correlation energy.\\n' % (time.time()-t))\n\nprint('Third order energy: %16.8f' % MP3corr_E)\nMP3corr_E += MP2corr_E\nMP3total_E = SCF_E + MP3corr_E\nprint('MP3 correlation energy: %16.8f' % MP3corr_E)\nprint('MP3 total energy: %16.8f' % MP3total_E)\npsi4.compare_values(psi4.energy('MP3'), MP3total_E, 6, 'MP3 Energy')\n\n\n" ]
[ [ "numpy.einsum", "numpy.set_printoptions" ] ]
KoconJan/BERT-NER-CLI
[ "6f1323bf6294bc05ee3ee9a58e5b932a68bb85c0" ]
[ "bert_ner.py" ]
[ "#! usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n\"\"\"\r\nCopyright 2018 The Google AI Language Team Authors.\r\nBASED ON Google_BERT.\r\n@Author:zhoukaiyin\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport os\r\nfrom bert import modeling\r\nfrom bert import optimization\r\nfrom bert import tokenization\r\nimport tensorflow as tf\r\nfrom sklearn.metrics import f1_score,precision_score,recall_score\r\nfrom tensorflow.python.ops import math_ops\r\nimport tf_metrics\r\n\r\nflags = tf.flags\r\n\r\nFLAGS = flags.FLAGS\r\n\r\nflags.DEFINE_string(\r\n \"data_dir\", './drive/My Drive/ai/NERdata',\r\n \"The input datadir.\",\r\n)\r\n\r\nflags.DEFINE_string(\r\n \"bert_config_file\", './drive/My Drive/ai/checkpoint/bert_config.json',\r\n \"The config json file corresponding to the pre-trained BERT model.\"\r\n)\r\n\r\nflags.DEFINE_string(\r\n \"task_name\", 'NER', \"The name of the task to train.\"\r\n)\r\n\r\nflags.DEFINE_string(\r\n \"output_dir\", './drive/My Drive/ai/output/result_dir/',\r\n \"The output directory where the model checkpoints will be written.\"\r\n)\r\n\r\nflags.DEFINE_string(\r\n \"tpu_name\", 'gcp_tpu',\r\n \"Use Google Cloud Colaborator TPU to train\"\r\n)\r\n\r\n## Other parameters\r\nflags.DEFINE_string(\r\n \"init_checkpoint\", './drive/My Drive/ai/checkpoint/bert_model.ckpt',\r\n \"Initial checkpoint (usually from a pre-trained BERT model).\"\r\n)\r\n\r\nflags.DEFINE_bool(\r\n \"do_lower_case\", True,\r\n \"Whether to lower case the input text.\"\r\n)\r\n\r\nflags.DEFINE_integer(\r\n \"max_seq_length\", 128,\r\n \"The maximum total input sequence length after WordPiece tokenization.\"\r\n)\r\n\r\nflags.DEFINE_bool(\r\n \"do_train\", True,\r\n \"Whether to run training.\"\r\n)\r\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\r\n\r\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\r\n\r\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\r\n\r\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\r\n\r\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\r\n\r\nflags.DEFINE_float(\"num_train_epochs\", 3.0, \"Total number of training epochs to perform.\")\r\n\r\nflags.DEFINE_float(\r\n \"warmup_proportion\", 0.1,\r\n \"Proportion of training to perform linear learning rate warmup for. \"\r\n \"E.g., 0.1 = 10% of training.\")\r\n\r\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\r\n \"How often to save the model checkpoint.\")\r\n\r\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\r\n \"How many steps to make in each estimator call.\")\r\n\r\nflags.DEFINE_string(\"vocab_file\", './drive/My Drive/ai/checkpoint/vocab.txt',\r\n \"The vocabulary file that the BERT model was trained on.\")\r\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\r\nflags.DEFINE_integer(\r\n \"num_tpu_cores\", 8,\r\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\r\n\r\nclass InputExample(object):\r\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\r\n\r\n def __init__(self, guid, text, label=None):\r\n \"\"\"Constructs a InputExample.\r\n\r\n Args:\r\n guid: Unique id for the example.\r\n text_a: string. The untokenized text of the first sequence. For single\r\n sequence tasks, only this sequence must be specified.\r\n label: (Optional) string. The label of the example. This should be\r\n specified for train and dev examples, but not for test examples.\r\n \"\"\"\r\n self.guid = guid\r\n self.text = text\r\n self.label = label\r\n\r\n\r\nclass InputFeatures(object):\r\n \"\"\"A single set of features of data.\"\"\"\r\n\r\n def __init__(self, input_ids, input_mask, segment_ids, label_ids):\r\n self.input_ids = input_ids\r\n self.input_mask = input_mask\r\n self.segment_ids = segment_ids\r\n self.label_ids = label_ids\r\n\r\n\r\nclass DataProcessor(object):\r\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\r\n\r\n def get_train_examples(self, data_dir):\r\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n def get_dev_examples(self, data_dir):\r\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n def get_labels(self):\r\n \"\"\"Gets the list of labels for this data set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n @classmethod\r\n def _read_data(cls, input_file):\r\n \"\"\"Reads a BIO data.\"\"\"\r\n with open(input_file) as f:\r\n lines = []\r\n words = []\r\n labels = []\r\n for line in f:\r\n contends = line.strip()\r\n word = line.strip().split(' ')[0]\r\n label = line.strip().split(' ')[-1]\r\n if contends.startswith(\"-DOCSTART-\"):\r\n words.append('')\r\n continue\r\n if len(contends) == 0 and words[-1] == '.':\r\n l = ' '.join([label for label in labels if len(label) > 0])\r\n w = ' '.join([word for word in words if len(word) > 0])\r\n lines.append([l, w])\r\n words = []\r\n labels = []\r\n continue\r\n words.append(word)\r\n labels.append(label)\r\n return lines\r\n\r\n\r\nclass NerProcessor(DataProcessor):\r\n def get_train_examples(self, data_dir):\r\n return self._create_example(\r\n self._read_data(os.path.join(data_dir, \"train.txt\")), \"train\"\r\n )\r\n\r\n def get_dev_examples(self, data_dir):\r\n return self._create_example(\r\n self._read_data(os.path.join(data_dir, \"dev.txt\")), \"dev\"\r\n )\r\n\r\n def get_labels(self):\r\n return [\"B-MISC\", \"I-MISC\", \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", \"X\"]\r\n\r\n def _create_example(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n guid = \"%s-%s\" % (set_type, i)\r\n text = tokenization.convert_to_unicode(line[1])\r\n label = tokenization.convert_to_unicode(line[0])\r\n examples.append(InputExample(guid=guid, text=text, label=label))\r\n return examples\r\n\r\n\r\ndef convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):\r\n label_map = {}\r\n for (i, label) in enumerate(label_list, 1):\r\n label_map[label] = i\r\n textlist = example.text.split(' ')\r\n labellist = example.label.split(' ')\r\n tokens = []\r\n labels = []\r\n for i, word in enumerate(textlist):\r\n token = tokenizer.tokenize(word)\r\n tokens.extend(token)\r\n label_1 = labellist[i]\r\n for m in range(len(token)):\r\n if m == 0:\r\n labels.append(label_1)\r\n else:\r\n labels.append(\"X\")\r\n # tokens = tokenizer.tokenize(example.text)\r\n if len(tokens) >= max_seq_length - 1:\r\n tokens = tokens[0:(max_seq_length - 2)]\r\n labels = labels[0:(max_seq_length - 2)]\r\n ntokens = []\r\n segment_ids = []\r\n label_ids = []\r\n ntokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n label_ids.append(0)\r\n for i, token in enumerate(tokens):\r\n ntokens.append(token)\r\n segment_ids.append(0)\r\n label_ids.append(label_map[labels[i]])\r\n ntokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n label_ids.append(0)\r\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\r\n input_mask = [1] * len(input_ids)\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n label_ids.append(0)\r\n # print(len(input_ids))\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n assert len(label_ids) == max_seq_length\r\n\r\n if ex_index < 5:\r\n tf.logging.info(\"*** Example ***\")\r\n tf.logging.info(\"guid: %s\" % (example.guid))\r\n tf.logging.info(\"tokens: %s\" % \" \".join(\r\n [tokenization.printable_text(x) for x in tokens]))\r\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n tf.logging.info(\"label_ids: %s\" % \" \".join([str(x) for x in label_ids]))\r\n feature = InputFeatures(\r\n input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n label_ids=label_ids\r\n )\r\n return feature\r\n\r\n\r\ndef filed_based_convert_examples_to_features(\r\n examples, label_list, max_seq_length, tokenizer, output_file\r\n):\r\n writer = tf.python_io.TFRecordWriter(output_file)\r\n for (ex_index, example) in enumerate(examples):\r\n if ex_index % 5000 == 0:\r\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\r\n feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)\r\n\r\n def create_int_feature(values):\r\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\r\n return f\r\n\r\n features = collections.OrderedDict()\r\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\r\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\r\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\r\n features[\"label_ids\"] = create_int_feature(feature.label_ids)\r\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\r\n writer.write(tf_example.SerializeToString())\r\n\r\n\r\ndef file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder):\r\n name_to_features = {\r\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\r\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\r\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\r\n \"label_ids\": tf.FixedLenFeature([seq_length], tf.int64),\r\n }\r\n\r\n def _decode_record(record, name_to_features):\r\n example = tf.parse_single_example(record, name_to_features)\r\n for name in list(example.keys()):\r\n t = example[name]\r\n if t.dtype == tf.int64:\r\n t = tf.to_int32(t)\r\n example[name] = t\r\n return example\r\n\r\n def input_fn(params):\r\n batch_size = params[\"batch_size\"]\r\n d = tf.data.TFRecordDataset(input_file)\r\n if is_training:\r\n d = d.repeat()\r\n d = d.shuffle(buffer_size=100)\r\n d = d.apply(tf.contrib.data.map_and_batch(\r\n lambda record: _decode_record(record, name_to_features),\r\n batch_size=batch_size,\r\n drop_remainder=drop_remainder\r\n ))\r\n return d\r\n return input_fn\r\n\r\n\r\ndef create_model(bert_config, is_training, input_ids, input_mask,\r\n segment_ids, labels, num_labels, use_one_hot_embeddings):\r\n model = modeling.BertModel(\r\n config=bert_config,\r\n is_training=is_training,\r\n input_ids=input_ids,\r\n input_mask=input_mask,\r\n token_type_ids=segment_ids,\r\n use_one_hot_embeddings=use_one_hot_embeddings\r\n )\r\n\r\n output_layer = model.get_sequence_output()\r\n\r\n hidden_size = output_layer.shape[-1].value\r\n\r\n output_weight = tf.get_variable(\r\n \"output_weights\", [num_labels, hidden_size],\r\n initializer=tf.truncated_normal_initializer(stddev=0.02)\r\n )\r\n output_bias = tf.get_variable(\r\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer()\r\n )\r\n with tf.variable_scope(\"loss\"):\r\n if is_training:\r\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\r\n output_layer = tf.reshape(output_layer, [-1, hidden_size])\r\n logits = tf.matmul(output_layer, output_weight, transpose_b=True)\r\n logits = tf.nn.bias_add(logits, output_bias)\r\n logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11])\r\n log_probs = tf.nn.log_softmax(logits, axis=-1)\r\n\r\n # labels = tf.cast(labels,dtype=tf.float32)\r\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\r\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\r\n loss = tf.reduce_sum(per_example_loss)\r\n return (loss, per_example_loss, logits)\r\n\r\n\r\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\r\n num_train_steps, num_warmup_steps, use_tpu,\r\n use_one_hot_embeddings):\r\n def model_fn(features, labels, mode, params):\r\n tf.logging.info(\"*** Features ***\")\r\n for name in sorted(features.keys()):\r\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\r\n input_ids = features[\"input_ids\"]\r\n input_mask = features[\"input_mask\"]\r\n segment_ids = features[\"segment_ids\"]\r\n label_ids = features[\"label_ids\"]\r\n\r\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\r\n\r\n (total_loss, per_example_loss, logits) = create_model(\r\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\r\n num_labels, use_one_hot_embeddings)\r\n tvars = tf.trainable_variables()\r\n scaffold_fn = None\r\n if init_checkpoint:\r\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\r\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\r\n if use_tpu:\r\n def tpu_scaffold():\r\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\r\n return tf.train.Scaffold()\r\n scaffold_fn = tpu_scaffold\r\n else:\r\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\r\n tf.logging.info(\"**** Trainable Variables ****\")\r\n\r\n for var in tvars:\r\n init_string = \"\"\r\n if var.name in initialized_variable_names:\r\n init_string = \", *INIT_FROM_CKPT*\"\r\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\r\n init_string)\r\n output_spec = None\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n train_op = optimization.create_optimizer(\r\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\r\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\r\n mode=mode,\r\n loss=total_loss,\r\n train_op=train_op,\r\n scaffold_fn=scaffold_fn)\r\n elif mode == tf.estimator.ModeKeys.EVAL:\r\n\r\n def metric_fn(per_example_loss, label_ids, logits):\r\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\r\n precision = tf_metrics.precision(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average=\"macro\")\r\n recall = tf_metrics.recall(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average=\"macro\")\r\n f = tf_metrics.f1(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average=\"macro\")\r\n loss = tf.metrics.mean(per_example_loss)\r\n return {\r\n \"eval_precision\":precision,\r\n \"eval_recall\":recall,\r\n \"eval_f\": f,\r\n \"eval_loss\": loss,\r\n }\r\n\r\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\r\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\r\n mode=mode,\r\n loss=total_loss,\r\n eval_metrics=eval_metrics,\r\n scaffold_fn=scaffold_fn)\r\n else:\r\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\r\n\r\n return output_spec\r\n\r\n return model_fn\r\n\r\n\r\ndef main(_):\r\n tf.logging.set_verbosity(tf.logging.INFO)\r\n processors = {\r\n \"ner\": NerProcessor\r\n }\r\n if not FLAGS.do_train and not FLAGS.do_eval:\r\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\r\n\r\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\r\n\r\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\r\n raise ValueError(\r\n \"Cannot use sequence length %d because the BERT model \"\r\n \"was only trained up to sequence length %d\" %\r\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\r\n\r\n task_name = FLAGS.task_name.lower()\r\n if task_name not in processors:\r\n raise ValueError(\"Task not found: %s\" % (task_name))\r\n processor = processors[task_name]()\r\n\r\n label_list = processor.get_labels()\r\n\r\n tokenizer = tokenization.FullTokenizer(\r\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\r\n tpu_cluster_resolver = None\r\n if FLAGS.use_tpu and FLAGS.tpu_name:\r\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR'])\r\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\r\n run_config = tf.contrib.tpu.RunConfig(\r\n cluster=tpu_cluster_resolver,\r\n master=FLAGS.master,\r\n model_dir=FLAGS.output_dir,\r\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\r\n tpu_config=tf.contrib.tpu.TPUConfig(\r\n iterations_per_loop=FLAGS.iterations_per_loop,\r\n num_shards=FLAGS.num_tpu_cores,\r\n per_host_input_for_training=is_per_host))\r\n\r\n train_examples = None\r\n num_train_steps = None\r\n num_warmup_steps = None\r\n if FLAGS.do_train:\r\n train_examples = processor.get_train_examples(FLAGS.data_dir)\r\n num_train_steps = int(\r\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\r\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\r\n model_fn = model_fn_builder(\r\n bert_config=bert_config,\r\n num_labels=len(label_list)+1,\r\n init_checkpoint=FLAGS.init_checkpoint,\r\n learning_rate=FLAGS.learning_rate,\r\n num_train_steps=num_train_steps,\r\n num_warmup_steps=num_warmup_steps,\r\n use_tpu=FLAGS.use_tpu,\r\n use_one_hot_embeddings=FLAGS.use_tpu)\r\n estimator = tf.contrib.tpu.TPUEstimator(\r\n use_tpu=FLAGS.use_tpu,\r\n model_fn=model_fn,\r\n config=run_config,\r\n train_batch_size=FLAGS.train_batch_size,\r\n eval_batch_size=FLAGS.eval_batch_size)\r\n\r\n if FLAGS.do_train:\r\n train_file = os.path.join(FLAGS.output_dir, \"train.tf_record\")\r\n filed_based_convert_examples_to_features(\r\n train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)\r\n tf.logging.info(\"***** Running training *****\")\r\n tf.logging.info(\" Num examples = %d\", len(train_examples))\r\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\r\n tf.logging.info(\" Num steps = %d\", num_train_steps)\r\n train_input_fn = file_based_input_fn_builder(\r\n input_file=train_file,\r\n seq_length=FLAGS.max_seq_length,\r\n is_training=True,\r\n drop_remainder=True)\r\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\r\n if FLAGS.do_eval:\r\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\r\n eval_file = os.path.join(FLAGS.output_dir, \"eval.tf_record\")\r\n filed_based_convert_examples_to_features(\r\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)\r\n\r\n tf.logging.info(\"***** Running evaluation *****\")\r\n tf.logging.info(\" Num examples = %d\", len(eval_examples))\r\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\r\n eval_steps = None\r\n if FLAGS.use_tpu:\r\n eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)\r\n eval_drop_remainder = True if FLAGS.use_tpu else False\r\n eval_input_fn = file_based_input_fn_builder(\r\n input_file=eval_file,\r\n seq_length=FLAGS.max_seq_length,\r\n is_training=False,\r\n drop_remainder=eval_drop_remainder)\r\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\r\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\r\n with open(output_eval_file, \"w\") as writer:\r\n tf.logging.info(\"***** Eval results *****\")\r\n for key in sorted(result.keys()):\r\n tf.logging.info(\" %s = %s\", key, str(result[key]))\r\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n tf.app.run()\r\n\r\n\r\n" ]
[ [ "tensorflow.data.TFRecordDataset", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.reshape", "tensorflow.logging.set_verbosity", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.one_hot", "tensorflow.reduce_sum", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.truncated_normal_initializer", "tensorflow.nn.log_softmax", "tensorflow.train.Features", "tensorflow.FixedLenFeature", "tensorflow.train.init_from_checkpoint", "tensorflow.python_io.TFRecordWriter", "tensorflow.train.Scaffold", "tensorflow.app.run", "tensorflow.parse_single_example", "tensorflow.zeros_initializer", "tensorflow.flags.DEFINE_string", "tensorflow.logging.info", "tensorflow.nn.bias_add", "tensorflow.trainable_variables", "tensorflow.to_int32", "tensorflow.argmax" ] ]
NSLS-II/bluesky
[ "b7d666e65cf4ef556fb46b744c33264c8e3f7507" ]
[ "bluesky/simulators.py" ]
[ "from warnings import warn\nfrom bluesky.utils import maybe_await\nfrom bluesky.preprocessors import print_summary_wrapper\nfrom bluesky.run_engine import call_in_bluesky_event_loop, in_bluesky_event_loop\nfrom .protocols import Checkable\n\n\ndef plot_raster_path(plan, x_motor, y_motor, ax=None, probe_size=None, lw=2):\n \"\"\"Plot the raster path for this plan\n\n Parameters\n ----------\n plan : iterable\n Must yield `Msg` objects and not be a co-routine\n\n x_motor, y_motor : str\n Names of the x and y motors\n\n ax : matplotlib.axes.Axes\n The axes to plot to, if none, make new figure + axes\n\n probe_size : float, optional\n If not None, use as radius of probe (in same units as motor positions)\n\n lw : float, optional\n Width of lines drawn between points\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib import collections as mcollections\n from matplotlib import patches as mpatches\n if ax is None:\n ax = plt.subplots()[1]\n ax.set_aspect('equal')\n\n cur_x = cur_y = None\n traj = []\n for msg in plan:\n cmd = msg.command\n if cmd == 'set':\n if msg.obj.name == x_motor:\n cur_x = msg.args[0]\n if msg.obj.name == y_motor:\n cur_y = msg.args[0]\n elif cmd == 'save':\n traj.append((cur_x, cur_y))\n\n x, y = zip(*traj)\n path, = ax.plot(x, y, marker='', linestyle='-', lw=lw)\n ax.set_xlabel(x_motor)\n ax.set_ylabel(y_motor)\n if probe_size is None:\n read_points = ax.scatter(x, y, marker='o', lw=lw)\n else:\n circles = [mpatches.Circle((_x, _y), probe_size,\n facecolor='black', alpha=0.5)\n for _x, _y in traj]\n\n read_points = mcollections.PatchCollection(circles,\n match_original=True)\n ax.add_collection(read_points)\n return {'path': path, 'events': read_points}\n\n\ndef summarize_plan(plan):\n \"\"\"Print summary of plan\n\n Prints a minimal version of the plan, showing only moves and\n where events are created.\n\n Parameters\n ----------\n plan : iterable\n Must yield `Msg` objects\n \"\"\"\n for msg in print_summary_wrapper(plan):\n ...\n\n\nprint_summary = summarize_plan # back-compat\n\n\ndef check_limits(plan):\n \"\"\"Run check_limits_async in the RE\"\"\"\n if in_bluesky_event_loop():\n raise RuntimeError(\"Can't call check_limits() from within RE, use await check_limits_async() instead\")\n call_in_bluesky_event_loop(check_limits_async(plan))\n\n\nasync def check_limits_async(plan):\n \"\"\"\n Check that a plan will not move devices outside of their limits.\n\n Parameters\n ----------\n plan : iterable\n Must yield `Msg` objects\n \"\"\"\n ignore = []\n for msg in plan:\n obj = msg.obj\n if msg.command == 'set' and obj not in ignore:\n if isinstance(obj, Checkable):\n await maybe_await(obj.check_value(msg.args[0]))\n else:\n warn(f\"{obj.name} has no check_value() method\"\n f\" to check if {msg.args[0]} is within its limits.\")\n ignore.append(obj)\n" ]
[ [ "matplotlib.collections.PatchCollection", "matplotlib.patches.Circle", "matplotlib.pyplot.subplots" ] ]
bglick13/multi-agent-emergence-environments
[ "e02d66f0734d95470d15a4508ff369a75fa093a4" ]
[ "ma_planning/ma_policy.py" ]
[ "import numpy as np\n\nfrom collections import deque\nfrom typing import Union\nfrom torch import nn, FloatTensor, LongTensor\nfrom torch.functional import F\nfrom torch.optim import Adam\nfrom torch.nn import CrossEntropyLoss\n\nfrom mae_envs.envs import DraftState\nfrom mcts import SearchNode, SearchProblem\n\n\n\nclass SwarmAgent():\n def __init__(self, model, env):\n self.model = model\n self.env = env\n self.macro_action = None\n\n def set_action(self, action):\n self.macro_action = action\n\n def act(self):\n return self.macro_action\n\n\nclass CaptainAgent():\n def __init__(self, model, env, agents):\n self.model = model\n self.best_model = model\n self.env = env\n self.agents = agents\n self.solver = None\n\n def simulate(self):\n leaf = self.solver.rollout()\n value = self.evaluate_leaf(leaf)\n self.solver.backup(leaf, value)\n return leaf\n\n def get_action(self, obs, num_reads=100, action=-1, random=False):\n if self.solver is None:\n self.root = SearchNode(obs, action)\n self.solver = SearchProblem(self.root)\n else:\n self.root = SearchNode(obs, action, self.root)\n self.solver.root = self.root\n\n leafs = []\n for _ in range(num_reads):\n leafs.append(self.simulate())\n\n action, value, values = self.root.best_child()\n successor, _, _, _ = env.step(action)\n nn_probs, nn_value = self.get_preds(successor)\n p = F.softmax(FloatTensor(values), -1).numpy()\n if random:\n action = np.random.choice(range(len(values)), p=p)\n else:\n top5 = values.argsort()[-5:]\n _p = F.softmax(FloatTensor(values[top5]), -1).numpy()\n action = np.random.choice(top5, p=_p)\n return action, values, p, nn_value, leafs\n\n def get_preds(self, obs):\n s_in = torch.FloatTensor(obs)\n s_in.requires_grad = False\n encoded_s = self.model.forward(s_in)\n probs = self.model.get_next_action_output(encoded_s) # n_agents x 3 x 11\n probs = F.softmax(torch.FloatTensor(probs), dim=2).detach().cpu().numpy()\n value = F.softmax(self.model.get_value_output(encoded_s)).detach().cpu().numpy()\n return probs, value\n\n def evaluate_leaf(self, leaf):\n probs, value = self.get_preds(leaf)\n if not leaf.is_terminal:\n leaf.expand(probs)\n return value" ]
[ [ "torch.FloatTensor", "numpy.random.choice" ] ]
evamariaa/Eureka
[ "a3e739a528fbe85ec588bca996188765649b7778" ]
[ "eureka/S3_data_reduction/nirspec.py" ]
[ "# NIRSpec specific rountines go here\nimport os\nimport numpy as np\nfrom astropy.io import fits\nfrom . import sigrej, background, nircam\nfrom . import bright2flux as b2f\n\ndef read(filename, data, meta):\n '''Reads single FITS file from JWST's NIRCam instrument.\n\n Parameters\n ----------\n filename: str\n Single filename to read\n data: DataClass\n The data object in which the fits data will stored\n meta: MetaClass\n The metadata object\n\n Returns\n -------\n data: DataClass\n The updated data object with the fits data stored inside\n\n Notes\n -----\n History:\n\n - November 2012 Kevin Stevenson\n Initial version\n - June 2021 Aarynn Carter/Eva-Maria Ahrer\n Updated for NIRSpec\n '''\n\n assert isinstance(filename, str)\n\n # Decide whether to perform the Stage 2 processing ourselves.\n # if stage2_processing:\n # \t# Run pipeline on a *_rateints.fits Stage 1 data product, but avoiding significant subarray trimming.\n # \tstage2_filename = process_to_stage2(filename, do_assignwcs=do_assignwcs, do_extract2d=do_extract2d, do_srctype=do_srctype, do_flatfield=do_flatfield, do_photom=do_photom, delete_files=delete_files)\n # else:\n # \t# Use the input file as is.\n # \tstage2_filename = filename\n\n\n # Now we can start working with the data.\n hdulist \t\t= fits.open(filename)\n data.mhdr \t\t= hdulist[0].header\n data.shdr \t\t= hdulist['SCI',1].header\n\n data.intstart \t= 1\n print(' WARNING: Manually setting INTSTART to 1 for NIRSpec CV3 data.')\n #data.intstart = data.mhdr['INTSTART']\n data.intend \t= data.mhdr['NINTS']\n\n data.data \t\t= hdulist['SCI',1].data\n data.err \t\t= hdulist['ERR',1].data\n data.dq \t\t= hdulist['DQ',1].data\n data.wave \t\t= hdulist['WAVELENGTH',1].data\n data.v0 \t\t= hdulist['VAR_RNOISE',1].data\n data.int_times\t= hdulist['INT_TIMES',1].data[data.intstart-1:data.intend]\n\n # Record integration mid-times in BJD_TDB\n # data.bjdtdb = data.int_times['int_mid_BJD_TDB']\n # There is no time information in the simulated NIRSpec data\n print(' WARNING: The timestamps for the simulated NIRSpec data are currently '\n 'hardcoded because they are not in the .fits files themselves')\n data.bjdtdb = np.linspace(data.mhdr['EXPSTART'], data.mhdr['EXPEND'], data.intend)\n\n # NIRSpec CV3 data has a lot of NaNs in the data and err arrays, which is making life difficult.\n print(' WARNING: Manually changing NaNs from DATA and ERR arrays to 0 for the CV3 data')\n data.err[np.where(np.isnan(data.err))] = np.inf\n data.data[np.where(np.isnan(data.data))] = 0\n\n return data, meta\n\n\ndef flag_bg(data, meta):\n '''Outlier rejection of sky background along time axis.\n\n Uses the code written for NIRCam and untested for NIRSpec, but likely to still work\n\n Parameters\n ----------\n data: DataClass\n The data object in which the fits data will stored\n meta: MetaClass\n The metadata object\n\n Returns\n -------\n data: DataClass\n The updated data object with outlier background pixels flagged.\n '''\n return nircam.flag_bg(data, meta)\n\n\ndef fit_bg(data, meta, mask, y1, y2, bg_deg, p3thresh, n, isplots=False):\n '''Fit for a non-uniform background.\n\n Uses the code written for NIRCam and untested for NIRSpec, but likely to still work\n '''\n return nircam.fit_bg(data, meta, mask, y1, y2, bg_deg, p3thresh, n, isplots=isplots)\n" ]
[ [ "numpy.linspace", "numpy.isnan" ] ]
lvwj19/PPR-Net-
[ "e5d305b39a1fa453fb3f58ed51468008e7bfa5a3" ]
[ "pprnet/utils/visualize_util.py" ]
[ "import os\nimport numpy as np\nimport random\nimport h5py\nimport sys\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, '..'))\nimport show3d_balls \n\ndef show_points(point_array, color_array=None, radius=3):\n assert isinstance(point_array, list)\n all_color = None\n if color_array is not None:\n if color_array == 'random':\n color_array = [np.array([random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]) for i in range(len(point_array))]\n assert len(point_array) == len(color_array)\n all_color = [ np.zeros( [ pnts.shape[0] ,3] ) for pnts in point_array]\n \n for i, c in enumerate(color_array):\n c=c.tolist()\n all_color[i][:] = [c[1],c[0],c[2]]\n all_color = np.concatenate(all_color, axis=0)\n all_points = np.concatenate(point_array, axis=0)\n show3d_balls.showpoints(all_points, c_gt=all_color, ballradius=radius)\n\ndef show_models(model_pc, trans, rot_mat, cls_idx, color_array=None, radius=3):\n assert len(trans) == len(rot_mat) == len(cls_idx)\n all_points = []\n all_color = [] if color_array is not None else None\n if color_array == 'random':\n color_array = [ [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)] for i in range(len(cls_idx))]\n for i in range(len(cls_idx)):\n model_pc_transformed = np.dot(model_pc[cls_idx[i]], rot_mat[i].T) + \\\n np.tile(np.reshape(trans[i], [1, 3]), [model_pc[cls_idx[i]].shape[0], 1])\n all_points.append(model_pc_transformed)\n colors_tmp = np.tile(np.array(color_array[i]).reshape(1,3).astype(np.float32), [model_pc_transformed.shape[0], 1])\n if all_color is not None:\n all_color.append(colors_tmp)\n \n all_points = np.concatenate(all_points, axis=0)\n if all_color is not None:\n all_color = np.concatenate(all_color, axis=0)\n show3d_balls.showpoints(all_points, c_gt=all_color, ballradius=radius)\n\ndef get_models_points(model_pc, trans, rot_mat, cls_idx):\n assert len(trans) == len(rot_mat) == len(cls_idx)\n all_points = []\n for i in range(len(cls_idx)):\n model_pc_transformed = np.dot(model_pc[cls_idx[i]], rot_mat[i].T) + \\\n np.tile(np.reshape(trans[i], [1, 3]), [model_pc[cls_idx[i]].shape[0], 1])\n all_points.append(model_pc_transformed)\n all_points = np.concatenate(all_points, axis=0)\n return all_points\n\n \n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.reshape", "numpy.array", "numpy.concatenate" ] ]
vgutta/AMPL
[ "46759aa84fd6acfc14facad0e14cb05a43d2e309" ]
[ "atomsci/ddm/pipeline/model_wrapper.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nContains class ModelWrapper and its subclasses, which are wrappers for DeepChem and scikit-learn model classes.\n\"\"\"\n\nimport logging\nimport os\nimport shutil\nimport joblib\nimport pdb\n\nimport deepchem as dc\nimport numpy as np\nimport tensorflow as tf\nif dc.__version__.startswith('2.1'):\n from deepchem.models.tensorgraph.fcnet import MultitaskRegressor, MultitaskClassifier\nelse:\n from deepchem.models.fcnet import MultitaskRegressor, MultitaskClassifier\nfrom collections import OrderedDict\nimport torch\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import RandomForestRegressor\n\ntry:\n import xgboost as xgb\n xgboost_supported = True\nexcept ImportError:\n xgboost_supported = False\n\nimport pickle\nimport yaml\nimport glob\nfrom datetime import datetime\nimport time\nimport socket\nfrom packaging import version\n\nfrom atomsci.ddm.utils import datastore_functions as dsf\nfrom atomsci.ddm.utils import llnl_utils\nfrom atomsci.ddm.pipeline import transformations as trans\nfrom atomsci.ddm.pipeline import perf_data as perf\n\nlogging.basicConfig(format='%(asctime)-15s %(message)s')\n\ndef dc_restore(model, checkpoint=None, model_dir=None, session=None):\n \"\"\"Reload the values of all variables from a checkpoint file.\n\n copied from DeepChem 2.3 keras_model.py to silence warnings caused\n when a model is loaded in inference mode.\n\n Args:\n model (DeepChem.KerasModel: keras model to restore\n\n checkpoint (str): the path to the checkpoint file to load. If this is None, the most recent\n checkpoint will be chosen automatically. Call get_checkpoints() to get a\n list of all available checkpoints.\n\n model_dir (str): default None\n Directory to restore checkpoint from. If None, use model.model_dir.\n\n session (tf.Session()) default None\n Session to run restore ops under. If None, model.session is used.\n\n Returns:\n None\n \"\"\"\n model._ensure_built()\n if model_dir is None:\n model_dir = model.model_dir\n if checkpoint is None:\n checkpoint = tf.train.latest_checkpoint(model_dir)\n if checkpoint is None:\n raise ValueError('No checkpoint found')\n if tf.executing_eagerly():\n # expect_partial() silences warnings when this model is restored for\n # inference only.\n model._checkpoint.restore(checkpoint).expect_partial()\n else:\n if session is None:\n session = model.session\n # expect_partial() silences warnings when this model is restored for\n # inference only.\n model._checkpoint.restore(checkpoint).expect_partial().run_restore_ops(session)\n\n# ****************************************************************************************\ndef create_model_wrapper(params, featurizer, ds_client=None):\n \"\"\"Factory function for creating Model objects of the correct subclass for params.model_type.\n\n Args:\n params (Namespace): Parameters passed to the model pipeline\n\n featurizer (Featurization): Object managing the featurization of compounds\n\n ds_client (DatastoreClient): Interface to the file datastore\n\n Returns:\n model (pipeline.Model): Wrapper for DeepChem, sklearn or other model.\n\n Raises:\n ValueError: Only params.model_type = 'NN', 'RF' or 'xgboost' is supported.\n \"\"\"\n if params.model_type == 'NN':\n return DCNNModelWrapper(params, featurizer, ds_client)\n elif params.model_type == 'RF':\n return DCRFModelWrapper(params, featurizer, ds_client)\n elif params.model_type == 'xgboost':\n if not xgboost_supported:\n raise Exception(\"Unable to import xgboost. \\\n xgboost package needs to be installed to use xgboost model. \\\n Installatin: \\\n from pip: pip3 install xgboost==0.90.\\\n livermore compute (lc): /usr/mic/bio/anaconda3/bin/pip install xgboost==0.90 --user \\\n twintron-blue (TTB): /opt/conda/bin/pip install xgboost==0.90 --user/ \\ \"\n )\n elif version.parse(xgb.__version__) < version.parse('0.9'):\n raise Exception(f\"xgboost required to be = 0.9 for GPU support. \\\n current version = xgb.__version__ \\\n installation: \\\n from pip: pip install xgboost==0.90\")\n else:\n return DCxgboostModelWrapper(params, featurizer, ds_client)\n elif params.model_type == 'hybrid':\n return HybridModelWrapper(params, featurizer, ds_client)\n else:\n raise ValueError(\"Unknown model_type %s\" % params.model_type)\n\n# ****************************************************************************************\n\nclass ModelWrapper(object):\n \"\"\"Wrapper for DeepChem and sklearn model objects. Provides methods to train and test a model,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n set in setup_model_dirs:\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n\n \"\"\"\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes ModelWrapper object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurization object): initialized outside of model_wrapper\n\n ds_client (DatastoreClient): Interface to the file datastore\n\n Side effects:\n Sets the following attributes of ModelWrapper:\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n \"\"\"\n self.params = params\n self.featurization = featurizer\n self.ds_client = ds_client\n self.log = logging.getLogger('ATOM')\n self.output_dir = self.params.output_dir\n self.model_dir = os.path.join(self.output_dir, 'model')\n os.makedirs(self.model_dir, exist_ok=True)\n self.transformers = []\n self.transformers_x = []\n\n # ****************************************************************************************\n\n def setup_model_dirs(self):\n \"\"\"Sets up paths and directories for persisting models at particular training epochs, used by\n the DeepChem model classes.\n\n Side effects:\n Sets the following attributes of ModelWrapper:\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n \"\"\"\n self.best_model_dir = os.path.join(self.output_dir, 'best_model')\n\n # ****************************************************************************************\n\n def train(self, pipeline):\n \"\"\"Trains a model (for multiple epochs if applicable), and saves the tuned model.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Raises:\n NotImplementedError: The method is implemented by subclasses\n \"\"\"\n raise NotImplementedError\n\n # ****************************************************************************************\n\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to the model type.\n\n Raises:\n NotImplementedError: The method is implemented by subclasses\n \"\"\"\n raise NotImplementedError\n # ****************************************************************************************\n\n def create_transformers(self, model_dataset):\n \"\"\"\n Initialize transformers for responses and/or features, and persist them for later.\n\n Args:\n model_dataset: The ModelDataset object that handles the current dataset\n\n Side effects\n Overwrites the attributes:\n transformers: A list of deepchem transformation objects on response_col, only if conditions are met\n\n transformers_x: A list of deepchem transformation objects on featurizers, only if conditions are met.\n\n params.transformer_key: A string pointing to the dataset key containing the transformer in the datastore, or the path to the transformer\n\n \"\"\"\n # TODO: Just a warning, we may have response transformers for classification datasets in the future\n if self.params.prediction_type=='regression' and self.params.transformers==True:\n # self.transformers = [\n # dc.trans.NormalizationTransformer(transform_y=True, dataset=model_dataset.dataset)]\n if self.params.model_type != \"hybrid\":\n self.transformers = [trans.NormalizationTransformerMissingData(transform_y=True, dataset=model_dataset.dataset)]\n else:\n self.transformers = [trans.NormalizationTransformerHybrid(transform_y=True, dataset=model_dataset.dataset)]\n\n # Set up transformers for features, if needed\n self.transformers_x = trans.create_feature_transformers(self.params, model_dataset)\n\n if len(self.transformers) > 0 or len(self.transformers_x) > 0:\n\n # Transformers are no longer saved as separate datastore objects; they are included in the model tarball\n self.params.transformer_key = os.path.join(self.output_dir, 'transformers.pkl')\n pickle.dump((self.transformers, self.transformers_x), open(self.params.transformer_key, 'wb'))\n self.log.info(\"Wrote transformers to %s\" % self.params.transformer_key)\n self.params.transformer_oid = \"\"\n self.params.transformer_bucket = \"\"\n\n # ****************************************************************************************\n\n def reload_transformers(self):\n \"\"\"\n Load response and feature transformers from datastore objects or files. Before AMPL v1.2 these\n were persisted as separate datastore objects when the model tracker was used; subsequently they\n are included in model tarballs, which should have been unpacked before this function gets called.\n \"\"\"\n\n # Try local path first to check for transformers unpacked from model tarball\n if not trans.transformers_needed(self.params):\n return\n local_path = f\"{self.output_dir}/transformers.pkl\"\n if os.path.exists(local_path):\n self.log.info(f\"Reloading transformers from model tarball {local_path}\")\n self.transformers, self.transformers_x = pickle.load(open(local_path, 'rb'))\n else:\n if self.params.transformer_key is not None:\n if self.params.save_results:\n self.log.info(f\"Reloading transformers from datastore key {self.params.transformer_key}\")\n self.transformers, self.transformers_x = dsf.retrieve_dataset_by_datasetkey(\n dataset_key = self.params.transformer_key,\n bucket = self.params.transformer_bucket,\n client = self.ds_client )\n else:\n self.log.info(f\"Reloading transformers from file {self.params.transformer_key}\")\n self.transformers, self.transformers_x = pickle.load(open( self.params.transformer_key, 'rb' ))\n else:\n # Shouldn't happen\n raise Exception(\"Transformers needed to reload model, but no transformer_key specified.\")\n\n\n # ****************************************************************************************\n\n def transform_dataset(self, dataset):\n \"\"\"\n Transform the responses and/or features in the given DeepChem dataset using the current transformers.\n\n Args:\n dataset: The DeepChem DiskDataset that contains a dataset\n\n Returns:\n transformed_dataset: The transformed DeepChem DiskDataset\n\n \"\"\"\n transformed_dataset = dataset\n if len(self.transformers) > 0:\n self.log.info(\"Transforming response data\")\n for transformer in self.transformers:\n transformed_dataset = transformer.transform(transformed_dataset)\n if len(self.transformers_x) > 0:\n self.log.info(\"Transforming feature data\")\n for transformer in self.transformers_x:\n transformed_dataset = transformer.transform(transformed_dataset)\n\n return transformed_dataset\n # ****************************************************************************************\n\n def get_num_features(self):\n \"\"\"Returns the number of dimensions of the feature space, taking both featurization method\n and transformers into account.\n \"\"\"\n if self.params.feature_transform_type == 'umap':\n return self.params.umap_dim\n else:\n return self.featurization.get_feature_count()\n\n # ****************************************************************************************\n\n def get_train_valid_pred_results(self, perf_data):\n \"\"\"Returns predicted values and metrics for the training, validation or test set\n associated with the PerfData object perf_data. Results are returned as a dictionary \n of parameter, value pairs in the format expected by the model tracker.\n\n Args:\n perf_data: A PerfData object that stores the predicted values and metrics\n\n Returns:\n dict: A dictionary of the prediction results\n\n \"\"\"\n return perf_data.get_prediction_results()\n\n # ****************************************************************************************\n def get_test_perf_data(self, model_dir, model_dataset):\n \"\"\"Returns the predicted values and metrics for the current test dataset against\n the version of the model stored in model_dir, as a PerfData object.\n\n Args:\n model_dir (str): Directory where the saved model is stored\n model_dataset (DiskDataset): Stores the current dataset and related methods\n\n Returns:\n perf_data: PerfData object containing the predicted values and metrics for the current test dataset\n \"\"\"\n # Load the saved model from model_dir\n self.reload_model(model_dir)\n\n # Create a PerfData object, which knows how to format the prediction results in the structure\n # expected by the model tracker.\n\n # We pass transformed=False to indicate that the preds and uncertainties we get from\n # generate_predictions are already untransformed, so that perf_data.get_prediction_results()\n # doesn't untransform them again.\n if hasattr(self.transformers[0], \"ishybrid\"):\n # indicate that we are training a hybrid model\n perf_data = perf.create_perf_data(\"hybrid\", model_dataset, self.transformers, 'test', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False)\n else:\n perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, self.transformers, 'test', transformed=False)\n test_dset = model_dataset.test_dset\n test_preds, test_stds = self.generate_predictions(test_dset)\n _ = perf_data.accumulate_preds(test_preds, test_dset.ids, test_stds)\n return perf_data\n\n # ****************************************************************************************\n def get_test_pred_results(self, model_dir, model_dataset):\n \"\"\"Returns predicted values and metrics for the current test dataset against the version\n of the model stored in model_dir, as a dictionary in the format expected by the model tracker.\n\n Args:\n model_dir (str): Directory where the saved model is stored\n model_dataset (DiskDataset): Stores the current dataset and related methods\n\n Returns:\n dict: A dictionary containing the prediction values and metrics for the current dataset.\n \"\"\"\n perf_data = self.get_test_perf_data(model_dir, model_dataset)\n return perf_data.get_prediction_results()\n\n # ****************************************************************************************\n def get_full_dataset_perf_data(self, model_dataset):\n \"\"\"Returns the predicted values and metrics from the current model for the full current dataset,\n as a PerfData object.\n\n Args:\n model_dataset (DiskDataset): Stores the current dataset and related methods\n\n Returns:\n perf_data: PerfData object containing the predicted values and metrics for the current full dataset\n \"\"\"\n\n # Create a PerfData object, which knows how to format the prediction results in the structure\n # expected by the model tracker.\n\n # We pass transformed=False to indicate that the preds and uncertainties we get from\n # generate_predictions are already untransformed, so that perf_data.get_prediction_results()\n # doesn't untransform them again.\n if hasattr(self.transformers[0], \"ishybrid\"):\n # indicate that we are training a hybrid model\n perf_data = perf.create_perf_data(\"hybrid\", model_dataset, self.transformers, 'full', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False)\n else:\n perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, self.transformers, 'full', transformed=False)\n full_preds, full_stds = self.generate_predictions(model_dataset.dataset)\n _ = perf_data.accumulate_preds(full_preds, model_dataset.dataset.ids, full_stds)\n return perf_data\n\n # ****************************************************************************************\n def get_full_dataset_pred_results(self, model_dataset):\n \"\"\"Returns predicted values and metrics from the current model for the full current dataset,\n as a dictionary in the format expected by the model tracker.\n\n Args:\n model_dataset (DiskDataset): Stores the current dataset and related methods\n\n Returns:\n dict: A dictionary containing predicted values and metrics for the current full dataset\n\n \"\"\"\n self.data = model_dataset\n perf_data = self.get_full_dataset_perf_data(model_dataset)\n return perf_data.get_prediction_results()\n\n def generate_predictions(self, dataset):\n \"\"\"\n\n Args:\n dataset:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n\n def reload_model(self, reload_dir):\n \"\"\"\n\n Args:\n reload_dir:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n\n\n # ****************************************************************************************\n def model_save(self):\n \"\"\"A wrapper function to save a model due to the `DeepChem model.save()` has inconsistent implementation.\n\n The `SKlearnModel()` class and xgboost model in DeepChem use `model.save()`,\n while the `MultitaskRegressor` class uses `model.save_checkpoint()`. The\n workaround is to try `model.save()` first. If failed, then try `model.save_checkpoint()`\n \"\"\"\n try:\n self.model.save()\n except Exception as error:\n try:\n self.model.save_checkpoint()\n except Exception as e:\n self.log.error(\"Error when saving model:\\n%s\" % str(e))\n\n\n# ****************************************************************************************\nclass DCNNModelWrapper(ModelWrapper):\n \"\"\"Contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n model_dir (str): The subdirectory under output_dir that contains the model. Created in setup_model_dirs.\n\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n\n g: The tensorflow graph object\n\n sess: The tensor flow graph session\n\n model: The dc.models.GraphConvModel, MultitaskRegressor, or MultitaskClassifier object, as specified by the params attribute\n\n Created in train:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n train_perf_data (np.array of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (np.array of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the training dataset\n\n valid_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the validation dataset\n\n \"\"\"\n\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes DCNNModelWrapper object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurizer object): initialized outside of model_wrapper\n\n Side effects:\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n g: The tensorflow graph object\n\n sess: The tensor flow graph session\n\n model: The dc.models.GraphConvModel, MultitaskRegressor, or MultitaskClassifier object, as specified by the params attribute\n\n\n \"\"\"\n super().__init__(params, featurizer, ds_client)\n self.g = tf.Graph()\n self.sess = tf.compat.v1.Session(graph=self.g)\n n_features = self.get_num_features()\n self.num_epochs_trained = 0\n\n if self.params.featurizer == 'graphconv':\n\n # Set defaults for layer sizes and dropouts, if not specified by caller. Note that\n # these depend on the featurizer used.\n\n if self.params.layer_sizes is None:\n self.params.layer_sizes = [64, 64, 128]\n if self.params.dropouts is None:\n if self.params.uncertainty:\n self.params.dropouts = [0.25] * len(self.params.layer_sizes)\n else:\n self.params.dropouts = [0.0] * len(self.params.layer_sizes)\n\n # TODO: Need to check that GraphConvModel params are actually being used\n self.model = dc.models.GraphConvModel(\n self.params.num_model_tasks,\n batch_size=self.params.batch_size,\n learning_rate=self.params.learning_rate,\n learning_rate_decay_time=1000,\n optimizer_type=self.params.optimizer_type,\n beta1=0.9,\n beta2=0.999,\n model_dir=self.model_dir,\n mode=self.params.prediction_type,\n tensorboard=False,\n uncertainty=self.params.uncertainty,\n graph_conv_layers=self.params.layer_sizes[:-1],\n dense_layer_size=self.params.layer_sizes[-1],\n dropout=self.params.dropouts,\n penalty=self.params.weight_decay_penalty,\n penalty_type=self.params.weight_decay_penalty_type)\n\n else:\n # Set defaults for layer sizes and dropouts, if not specified by caller. Note that\n # default layer sizes depend on the featurizer used.\n\n if self.params.layer_sizes is None:\n if self.params.featurizer == 'ecfp':\n self.params.layer_sizes = [1000, 500]\n elif self.params.featurizer in ['descriptors', 'computed_descriptors']:\n self.params.layer_sizes = [200, 100]\n else:\n # Shouldn't happen\n self.log.warning(\"You need to define default layer sizes for featurizer %s\" %\n self.params.featurizer)\n self.params.layer_sizes = [1000, 500]\n\n if self.params.dropouts is None:\n self.params.dropouts = [0.4] * len(self.params.layer_sizes)\n if self.params.weight_init_stddevs is None:\n self.params.weight_init_stddevs = [0.02] * len(self.params.layer_sizes)\n if self.params.bias_init_consts is None:\n self.params.bias_init_consts = [1.0] * len(self.params.layer_sizes)\n\n if self.params.prediction_type == 'regression':\n\n # TODO: Need to check that MultitaskRegressor params are actually being used\n self.model = MultitaskRegressor(\n self.params.num_model_tasks,\n n_features,\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n learning_rate=self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n batch_size=self.params.batch_size,\n seed=123,\n verbosity='low',\n model_dir=self.model_dir,\n learning_rate_decay_time=1000,\n beta1=0.9,\n beta2=0.999,\n mode=self.params.prediction_type,\n tensorboard=False,\n uncertainty=self.params.uncertainty)\n\n # print(\"JEA debug\",self.params.num_model_tasks,n_features,self.params.layer_sizes,self.params.weight_init_stddevs,self.params.bias_init_consts,self.params.dropouts,self.params.weight_decay_penalty,self.params.weight_decay_penalty_type,self.params.batch_size,self.params.learning_rate)\n # self.model = MultitaskRegressor(\n # self.params.num_model_tasks,\n # n_features,\n # layer_sizes=self.params.layer_sizes,\n # weight_init_stddevs=self.params.weight_init_stddevs,\n # bias_init_consts=self.params.bias_init_consts,\n # dropouts=self.params.dropouts,\n # weight_decay_penalty=self.params.weight_decay_penalty,\n # weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n # batch_size=self.params.batch_size,\n # learning_rate=self.params.learning_rate,\n # seed=123)\n\n else:\n # TODO: Need to check that MultitaskClassifier params are actually being used\n self.model = MultitaskClassifier(\n self.params.num_model_tasks,\n n_features,\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n learning_rate=self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n batch_size=self.params.batch_size,\n seed=123,\n verbosity='low',\n model_dir=self.model_dir,\n learning_rate_decay_time=1000,\n beta1=.9,\n beta2=.999,\n mode=self.params.prediction_type,\n tensorboard=False,\n n_classes=self.params.class_number)\n\n # ****************************************************************************************\n def recreate_model(self):\n \"\"\"\n Creates a new DeepChem Model object of the correct type for the requested featurizer and prediction type \n and returns it.\n \"\"\"\n if self.params.featurizer == 'graphconv':\n model = dc.models.GraphConvModel(\n self.params.num_model_tasks,\n batch_size=self.params.batch_size,\n learning_rate=self.params.learning_rate,\n learning_rate_decay_time=1000,\n optimizer_type=self.params.optimizer_type,\n beta1=0.9,\n beta2=0.999,\n model_dir=self.model_dir,\n mode=self.params.prediction_type,\n tensorboard=False,\n uncertainty=self.params.uncertainty,\n graph_conv_layers=self.params.layer_sizes[:-1],\n dense_layer_size=self.params.layer_sizes[-1],\n dropout=self.params.dropouts,\n penalty=self.params.weight_decay_penalty,\n penalty_type=self.params.weight_decay_penalty_type)\n\n else:\n n_features = self.get_num_features()\n if self.params.prediction_type == 'regression':\n model = MultitaskRegressor(\n self.params.num_model_tasks,\n n_features,\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n learning_rate=self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n batch_size=self.params.batch_size,\n seed=123,\n verbosity='low',\n model_dir=self.model_dir,\n learning_rate_decay_time=1000,\n beta1=0.9,\n beta2=0.999,\n mode=self.params.prediction_type,\n tensorboard=False,\n uncertainty=self.params.uncertainty)\n else:\n model = MultitaskClassifier(\n self.params.num_model_tasks,\n n_features,\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n learning_rate=self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n batch_size=self.params.batch_size,\n seed=123,\n verbosity='low',\n model_dir=self.model_dir,\n learning_rate_decay_time=1000,\n beta1=.9,\n beta2=.999,\n mode=self.params.prediction_type,\n tensorboard=False,\n n_classes=self.params.class_number)\n\n return model\n\n # ****************************************************************************************\n def train(self, pipeline):\n \"\"\"Trains a neural net model for multiple epochs, choose the epoch with the best validation\n set performance, refits the model for that number of epochs, and saves the tuned model.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Side effects:\n Sets the following attributes for DCNNModelWrapper:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n train_perf_data (list of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (list of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the training dataset\n\n valid_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the validation dataset\n \"\"\"\n # TODO: Fix docstrings above\n num_folds = len(pipeline.data.train_valid_dsets)\n if num_folds > 1:\n self.train_kfold_cv(pipeline)\n else:\n self.train_with_early_stopping(pipeline)\n\n # ****************************************************************************************\n def train_with_early_stopping(self, pipeline):\n \"\"\"Trains a neural net model for up to self.params.max_epochs epochs, while tracking the validation\n set metric given by params.model_choice_score_type. Saves a model checkpoint each time the metric\n is improved over its previous saved value by more than a threshold percentage. If the metric fails to\n improve for more than a specified 'patience' number of epochs, stop training and revert the model state\n to the last saved checkpoint. \n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Side effects:\n Sets the following attributes for DCNNModelWrapper:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n best_validation_score (float): The best validation model choice score attained during training.\n\n train_perf_data (list of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (list of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array): A standard training set performance metric (r2_score or roc_auc), at the end of each epoch.\n\n valid_epoch_perfs (np.array): A standard validation set performance metric (r2_score or roc_auc), at the end of each epoch.\n \"\"\"\n self.data = pipeline.data\n self.best_epoch = 0\n self.best_valid_score = None\n self.early_stopping_min_improvement = self.params.early_stopping_min_improvement\n self.early_stopping_patience = self.params.early_stopping_patience\n self.train_epoch_perfs = np.zeros(self.params.max_epochs)\n self.valid_epoch_perfs = np.zeros(self.params.max_epochs)\n self.test_epoch_perfs = np.zeros(self.params.max_epochs)\n self.train_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.valid_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.test_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.model_choice_scores = np.zeros(self.params.max_epochs)\n\n self.train_perf_data = []\n self.valid_perf_data = []\n self.test_perf_data = []\n\n for ei in range(self.params.max_epochs):\n self.train_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'train'))\n self.valid_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid'))\n self.test_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test'))\n\n test_dset = pipeline.data.test_dset\n\n time_limit = int(self.params.slurm_time_limit)\n training_start = time.time()\n\n train_dset, valid_dset = pipeline.data.train_valid_dsets[0]\n for ei in range(self.params.max_epochs):\n if llnl_utils.is_lc_system() and (ei > 0):\n # If we're running on an LC system, check that we have enough time to complete another epoch\n # before the current job finishes, by extrapolating from the time elapsed so far.\n\n now = time.time() \n elapsed_time = now - pipeline.start_time\n training_time = now - training_start\n time_remaining = time_limit * 60 - elapsed_time\n time_needed = training_time/ei\n\n if time_needed > 0.9 * time_remaining:\n self.log.warn(\"Projected time to finish one more epoch exceeds time left in job; cutting training to %d epochs\" %\n ei)\n self.params.max_epochs = ei\n break\n\n # Train the model for one epoch. We turn off automatic checkpointing, so the last checkpoint\n # saved will be the one we created intentionally when we reached a new best validation score.\n self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0)\n train_pred = self.model.predict(train_dset, [])\n valid_pred = self.model.predict(valid_dset, [])\n test_pred = self.model.predict(test_dset, [])\n\n train_perf = self.train_perf_data[ei].accumulate_preds(train_pred, train_dset.ids)\n valid_perf = self.valid_perf_data[ei].accumulate_preds(valid_pred, valid_dset.ids)\n test_perf = self.test_perf_data[ei].accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f\" % (\n ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf,\n pipeline.metric_type, test_perf))\n\n # Compute performance metrics for each subset, and check if we've reached a new best validation set score\n\n self.train_epoch_perfs[ei], _ = self.train_perf_data[ei].compute_perf_metrics()\n self.valid_epoch_perfs[ei], _ = self.valid_perf_data[ei].compute_perf_metrics()\n self.test_epoch_perfs[ei], _ = self.test_perf_data[ei].compute_perf_metrics()\n valid_score = self.valid_perf_data[ei].model_choice_score(self.params.model_choice_score_type)\n self.model_choice_scores[ei] = valid_score\n self.num_epochs_trained = ei + 1\n if self.best_valid_score is None:\n self.model.save_checkpoint()\n self.best_valid_score = valid_score\n self.best_epoch = ei\n elif valid_score - self.best_valid_score > self.early_stopping_min_improvement:\n # Save a new checkpoint\n self.model.save_checkpoint()\n self.best_valid_score = valid_score\n self.best_epoch = ei\n elif ei - self.best_epoch > self.early_stopping_patience:\n self.log.info(f\"No improvement after {self.early_stopping_patience} epochs, stopping training\")\n break\n\n # Revert to last checkpoint\n dc_restore(self.model)\n self.model_save()\n\n # Only copy the model files we need, not the entire directory\n self._copy_model(self.best_model_dir)\n self.log.info(f\"Best model from epoch {self.best_epoch} saved to {self.best_model_dir}\")\n\n\n\n # ****************************************************************************************\n def train_kfold_cv(self, pipeline):\n \"\"\"Trains a neural net model with K-fold cross-validation for a specified number of epochs.\n Finds the epoch with the best validation set performance averaged over folds, then refits \n a model for the same number of epochs to the combined training and validation data.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Side effects:\n Sets the following attributes for DCNNModelWrapper:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n train_perf_data (list of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (list of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array): Contains a standard training set performance metric (r2_score or roc_auc), averaged over folds,\n at the end of each epoch.\n\n valid_epoch_perfs (np.array): Contains a standard validation set performance metric (r2_score or roc_auc), averaged over folds,\n at the end of each epoch.\n \"\"\"\n # TODO: Fix docstrings above\n num_folds = len(pipeline.data.train_valid_dsets)\n self.data = pipeline.data\n self.best_epoch = 0\n self.best_valid_score = None\n self.train_epoch_perfs = np.zeros(self.params.max_epochs)\n self.valid_epoch_perfs = np.zeros(self.params.max_epochs)\n self.test_epoch_perfs = np.zeros(self.params.max_epochs)\n self.train_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.valid_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.test_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.model_choice_scores = np.zeros(self.params.max_epochs)\n self.early_stopping_min_improvement = self.params.early_stopping_min_improvement\n self.early_stopping_patience = self.params.early_stopping_patience\n\n\n # Create PerfData structures for computing cross-validation metrics\n self.valid_perf_data = []\n for ei in range(self.params.max_epochs):\n self.valid_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid'))\n\n test_dset = pipeline.data.test_dset\n\n time_limit = int(self.params.slurm_time_limit)\n training_start = time.time()\n\n # Train a separate model for each fold\n models = []\n for k in range(num_folds):\n models.append(self.recreate_model())\n\n for ei in range(self.params.max_epochs):\n \n if llnl_utils.is_lc_system() and (ei > 0):\n # If we're running on an LC system, check that we have enough time to complete another epoch\n # across all folds, plus rerun the training, before the current job finishes, by \n # extrapolating from the time elapsed so far.\n \n now = time.time() \n elapsed_time = now - pipeline.start_time\n training_time = now - training_start\n time_remaining = time_limit * 60 - elapsed_time\n\n # epochs_remaining is how many epochs we have to run if we do one more across all folds,\n # then do self.best_epoch+1 epochs on the combined training & validation set, allowing for the\n # possibility that the next epoch may be the best one.\n\n epochs_remaining = ei + 2\n time_per_epoch = training_time/ei\n time_needed = epochs_remaining * time_per_epoch\n \n if time_needed > 0.9 * time_remaining:\n self.log.warn('Projected time to finish one more epoch exceeds time left in job; cutting training to %d epochs' % ei)\n self.params.max_epochs = ei\n break\n\n\n # Create PerfData structures that are only used within loop to compute metrics during initial training\n train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'train')\n test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test')\n for k in range(num_folds):\n self.model = models[k]\n train_dset, valid_dset = pipeline.data.train_valid_dsets[k]\n\n # We turn off automatic checkpointing - we only want to save a checkpoints for the final model.\n self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0, restore=False)\n train_pred = self.model.predict(train_dset, [])\n valid_pred = self.model.predict(valid_dset, [])\n test_pred = self.model.predict(test_dset, [])\n\n train_perf = train_perf_data.accumulate_preds(train_pred, train_dset.ids)\n valid_perf = self.valid_perf_data[ei].accumulate_preds(valid_pred, valid_dset.ids)\n test_perf = test_perf_data.accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Fold %d, epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f\" % (\n k, ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf,\n pipeline.metric_type, test_perf))\n\n # Compute performance metrics for current epoch across validation sets for all folds, and update\n # the best_epoch and best score if the new score exceeds the previous best score by a specified\n # threshold.\n\n self.valid_epoch_perfs[ei], self.valid_epoch_perf_stds[ei] = self.valid_perf_data[ei].compute_perf_metrics()\n valid_score = self.valid_perf_data[ei].model_choice_score(self.params.model_choice_score_type)\n self.model_choice_scores[ei] = valid_score\n self.num_epochs_trained = ei + 1\n if self.best_valid_score is None:\n self.best_valid_score = valid_score\n self.best_epoch = ei\n self.log.info(f\"Total cross-validation score for epoch {ei} is {valid_score:.3}\")\n elif valid_score - self.best_valid_score > self.early_stopping_min_improvement:\n self.best_valid_score = valid_score\n self.best_epoch = ei\n self.log.info(f\"*** Total cross-validation score for epoch {ei} is {valid_score:.3}, is new maximum\")\n elif ei - self.best_epoch > self.early_stopping_patience:\n self.log.info(f\"No improvement after {self.early_stopping_patience} epochs, stopping training\")\n break\n else:\n self.log.info(f\"Total cross-validation score for epoch {ei} is {valid_score:.3}\")\n\n # Train a new model for best_epoch epochs on the combined training/validation set. Compute the training and test\n # set metrics at each epoch.\n fit_dataset = pipeline.data.combined_training_data()\n retrain_start = time.time()\n self.model = self.recreate_model()\n self.log.info(f\"Best epoch was {self.best_epoch}, retraining with combined training/validation set\")\n\n self.train_perf_data = []\n self.test_perf_data = []\n for ei in range(self.best_epoch+1):\n self.train_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'train_valid'))\n self.test_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test'))\n\n self.model.fit(fit_dataset, nb_epoch=1, checkpoint_interval=0, restore=False)\n train_pred = self.model.predict(fit_dataset, [])\n test_pred = self.model.predict(test_dset, [])\n train_perf = self.train_perf_data[ei].accumulate_preds(train_pred, fit_dataset.ids)\n test_perf = self.test_perf_data[ei].accumulate_preds(test_pred, test_dset.ids)\n self.log.info(f\"Combined folds: Epoch {ei}, training {pipeline.metric_type} = {train_perf:.3},\"\n + f\"test {pipeline.metric_type} = {test_perf:.3}\")\n self.train_epoch_perfs[ei], self.train_epoch_perf_stds[ei] = self.train_perf_data[ei].compute_perf_metrics()\n self.test_epoch_perfs[ei], self.test_epoch_perf_stds[ei] = self.test_perf_data[ei].compute_perf_metrics()\n self.model.save_checkpoint()\n self.model_save()\n\n # Only copy the model files we need, not the entire directory\n self._copy_model(self.best_model_dir)\n retrain_time = time.time() - retrain_start\n self.log.info(\"Time to retrain model for %d epochs: %.1f seconds, %.1f sec/epoch\" % (self.best_epoch, retrain_time, \n retrain_time/self.best_epoch))\n\n # ****************************************************************************************\n def _copy_model(self, dest_dir):\n \"\"\"Copies the files needed to recreate a DeepChem NN model from the current model\n directory to a destination directory.\n\n Args:\n dest_dir (str): The destination directory for the model files\n \"\"\"\n\n chkpt_file = os.path.join(self.model_dir, 'checkpoint')\n with open(chkpt_file, 'r') as chkpt_in:\n chkpt_dict = yaml.load(chkpt_in.read())\n chkpt_prefix = chkpt_dict['model_checkpoint_path']\n files = [chkpt_file]\n # files.append(os.path.join(self.model_dir, 'model.pickle'))\n files.append(os.path.join(self.model_dir, '%s.index' % chkpt_prefix))\n # files.append(os.path.join(self.model_dir, '%s.meta' % chkpt_prefix))\n files = files + glob.glob(os.path.join(self.model_dir, '%s.data-*' % chkpt_prefix))\n self._clean_up_excess_files(dest_dir)\n for file in files:\n shutil.copy2(file, dest_dir)\n self.log.info(\"Saved model files to '%s'\" % dest_dir)\n\n\n # ****************************************************************************************\n def reload_model(self, reload_dir):\n \"\"\"Loads a saved neural net model from the specified directory.\n\n Args:\n reload_dir (str): Directory where saved model is located.\n model_dataset (ModelDataset Object): contains the current full dataset\n\n Side effects:\n Resets the value of model, transformers, and transformers_x\n \"\"\"\n if self.params.featurizer == 'graphconv':\n self.model = dc.models.GraphConvModel(\n n_tasks=self.params.num_model_tasks,\n n_features=self.get_num_features(),\n batch_size=self.params.batch_size,\n model_dir=reload_dir,\n uncertainty=self.params.uncertainty,\n graph_conv_layers=self.params.layer_sizes[:-1],\n dense_layer_size=self.params.layer_sizes[-1],\n dropout=self.params.dropouts,\n learning_rate=self.params.learning_rate,\n mode=self.params.prediction_type)\n elif self.params.prediction_type == 'regression':\n self.model = MultitaskRegressor(\n self.params.num_model_tasks,\n n_features=self.get_num_features(),\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n model_dir=reload_dir,\n learning_rate=self.params.learning_rate,\n uncertainty=self.params.uncertainty)\n else:\n self.model = MultitaskClassifier(\n self.params.num_model_tasks,\n n_features=self.get_num_features(),\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n model_dir=reload_dir,\n learning_rate=self.params.learning_rate,\n n_classes=self.params.class_number)\n # Hack to run models trained in DeepChem 2.1 with DeepChem 2.2\n # self.model.default_outputs = self.model.outputs\n # Get latest checkpoint path transposed to current model dir\n ckpt = tf.train.get_checkpoint_state(reload_dir)\n if os.path.exists(f\"{ckpt.model_checkpoint_path}.index\"):\n checkpoint = ckpt.model_checkpoint_path\n else:\n checkpoint = os.path.join(reload_dir, os.path.basename(ckpt.model_checkpoint_path))\n dc_restore(self.model, checkpoint=checkpoint)\n\n\n # Restore the transformers from the datastore or filesystem\n self.reload_transformers()\n\n\n # ****************************************************************************************\n def get_pred_results(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset. subset may be 'train', 'valid', 'test'\n accordingly. epoch_label indicates the training epoch we want results for; currently the\n only option for this is 'best'. Results are returned as a dictionary of parameter, value pairs.\n\n Args:\n subset (str): Label for the current subset of the dataset (choices ['train','valid','test','full'])\n\n epoch_label (str): Label for the training epoch we want results for (choices ['best'])\n\n Returns:\n dict: A dictionary of parameter/ value pairs of the prediction values and results of the dataset subset\n\n Raises:\n ValueError: if epoch_label not in ['best']\n\n ValueError: If subset not in ['train','valid','test','full']\n \"\"\"\n if subset == 'full':\n return self.get_full_dataset_pred_results(self.data)\n if epoch_label == 'best':\n epoch = self.best_epoch\n model_dir = self.best_model_dir\n else:\n raise ValueError(\"Unknown epoch_label '%s'\" % epoch_label)\n if subset == 'train':\n return self.get_train_valid_pred_results(self.train_perf_data[epoch])\n elif subset == 'valid':\n return self.get_train_valid_pred_results(self.valid_perf_data[epoch])\n elif subset == 'test':\n return self.get_train_valid_pred_results(self.test_perf_data[epoch])\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def get_perf_data(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset. subset may be 'train', 'valid', 'test' or 'full',\n epoch_label indicates the training epoch we want results for; currently the\n only option for this is 'best'. Results are returned as a PerfData object of the appropriate class \n for the model's split strategy and prediction type.\n\n Args:\n subset (str): Label for the current subset of the dataset (choices ['train','valid','test','full'])\n\n epoch_label (str): Label for the training epoch we want results for (choices ['best'])\n\n Returns:\n PerfData object: Performance object pulled from the appropriate subset\n\n Raises:\n ValueError: if epoch_label not in ['best']\n\n ValueError: If subset not in ['train','valid','test','full']\n \"\"\"\n\n if subset == 'full':\n return self.get_full_dataset_perf_data(self.data)\n if epoch_label == 'best':\n epoch = self.best_epoch\n model_dir = self.best_model_dir\n else:\n raise ValueError(\"Unknown epoch_label '%s'\" % epoch_label)\n\n if subset == 'train':\n return self.train_perf_data[epoch]\n elif subset == 'valid':\n return self.valid_perf_data[epoch]\n elif subset == 'test':\n #return self.get_test_perf_data(model_dir, self.data)\n return self.test_perf_data[epoch]\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n\n\n # ****************************************************************************************\n def generate_predictions(self, dataset):\n \"\"\"Generates predictions for specified dataset with current model, as well as standard deviations\n if params.uncertainty=True\n\n Args:\n dataset: the deepchem DiskDataset to generate predictions for\n\n Returns:\n (pred, std): tuple of predictions for compounds and standard deviation estimates, if requested.\n Each element of tuple is a numpy array of shape (ncmpds, ntasks, nclasses), where nclasses = 1 for regression\n models.\n \"\"\"\n pred, std = None, None\n self.log.info(\"Predicting values for current model\")\n\n # For deepchem's predict_uncertainty function, you are not allowed to specify transformers. That means that the\n # predictions are being made in the transformed space, not the original space. We call undo_transforms() to generate\n # the transformed predictions. To transform the standard deviations, we rely on the fact that at present we only use\n # dc.trans.NormalizationTransformer (which centers and scales the data).\n\n # Uncertainty is now supported by DeepChem's GraphConv, at least for regression models.\n # if self.params.uncertainty and self.params.prediction_type == 'regression' and self.params.featurizer != 'graphconv':\n\n # Current (2.1) DeepChem neural net classification models don't support uncertainties.\n if self.params.uncertainty and self.params.prediction_type == 'classification':\n self.log.warning(\"Warning: DeepChem neural net models support uncertainty for regression only.\")\n \n if self.params.uncertainty and self.params.prediction_type == 'regression':\n # For multitask, predict_uncertainty returns a list of (pred, std) tuples, one for each task.\n # For singletask, it returns one tuple. Convert the result into a pair of ndarrays of shape (ncmpds, ntasks, nclasses).\n pred_std = self.model.predict_uncertainty(dataset)\n if type(pred_std) == tuple:\n #JEA\n #ntasks = 1\n ntasks = len(pred_std[0][0])\n pred, std = pred_std\n pred = pred.reshape((pred.shape[0], 1, pred.shape[1]))\n std = std.reshape(pred.shape)\n else:\n ntasks = len(pred_std)\n pred0, std0 = pred_std[0]\n ncmpds = pred0.shape[0]\n nclasses = pred0.shape[1]\n pred = np.concatenate([p.reshape((ncmpds, 1, nclasses)) for p, s in pred_std], axis=1)\n std = np.concatenate([s.reshape((ncmpds, 1, nclasses)) for p, s in pred_std], axis=1)\n\n if self.params.transformers and self.transformers is not None:\n # Transform the standard deviations, if we can. This is a bit of a hack, but it works for\n # NormalizationTransformer, since the standard deviations used to scale the data are\n # stored in the transformer object.\n if len(self.transformers) == 1 and (isinstance(self.transformers[0], dc.trans.NormalizationTransformer) or isinstance(self.transformers[0],trans.NormalizationTransformerMissingData)):\n y_stds = self.transformers[0].y_stds.reshape((1,ntasks,1))\n std = std / y_stds\n pred = dc.trans.undo_transforms(pred, self.transformers)\n elif self.params.transformers and self.transformers is not None:\n pred = self.model.predict(dataset, self.transformers)\n if self.params.prediction_type == 'regression':\n pred = pred.reshape((pred.shape[0], pred.shape[1], 1))\n else:\n pred = self.model.predict(dataset, [])\n if self.params.prediction_type == 'regression':\n pred = pred.reshape((pred.shape[0], pred.shape[1], 1))\n return pred, std\n\n # ****************************************************************************************\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to neural network models.\n\n Returns:\n model_spec_metdata (dict): A dictionary of the parameter sets for the DCNNModelWrapper object.\n Parameters are saved under the key 'nn_specific' as a subdictionary.\n \"\"\"\n nn_metadata = dict(\n best_epoch = self.best_epoch,\n max_epochs = self.params.max_epochs,\n batch_size = self.params.batch_size,\n optimizer_type = self.params.optimizer_type,\n layer_sizes = self.params.layer_sizes,\n dropouts = self.params.dropouts,\n weight_init_stddevs = self.params.weight_init_stddevs,\n bias_init_consts = self.params.bias_init_consts,\n learning_rate = self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type\n )\n model_spec_metadata = dict(nn_specific = nn_metadata)\n return model_spec_metadata\n\n # ****************************************************************************************\n def _clean_up_excess_files(self, dest_dir):\n \"\"\"\n Function to clean up extra model files left behind in the training process.\n Only removes self.model_dir\n \"\"\"\n if os.path.exists(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n \n# ****************************************************************************************\nclass HybridModelWrapper(ModelWrapper):\n \"\"\"A wrapper for hybrid models, contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n model_dir (str): The subdirectory under output_dir that contains the model. Created in setup_model_dirs.\n\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n\n model: The PyTorch NN sequential model.\n Created in train:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n train_perf_data (np.array of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (np.array of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the training dataset\n\n valid_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the validation dataset\n\n \"\"\"\n\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes HybridModelWrapper object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurizer object): initialized outside of model_wrapper\n\n Side effects:\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transforsamers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n model: dc.models.TorchModel\n \"\"\"\n super().__init__(params, featurizer, ds_client)\n if self.params.layer_sizes is None:\n if self.params.featurizer == 'ecfp':\n self.params.layer_sizes = [1000, 500]\n elif self.params.featurizer in ['descriptors', 'computed_descriptors']:\n self.params.layer_sizes = [200, 100]\n else:\n # Shouldn't happen\n self.log.warning(\"You need to define default layer sizes for featurizer %s\" %\n self.params.featurizer)\n self.params.layer_sizes = [1000, 500]\n\n if self.params.dropouts is None:\n self.params.dropouts = [0.4] * len(self.params.layer_sizes)\n\n n_features = self.get_num_features()\n if socket.gethostname()[:3] == \"sur\":\n self.dev = torch.device(\"cpu\")\n else:\n self.dev = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n if self.params.prediction_type == 'regression':\n model_dict = OrderedDict([\n (\"layer1\", torch.nn.Linear(n_features, self.params.layer_sizes[0]).to(self.dev)),\n (\"dp1\", torch.nn.Dropout(p=self.params.dropouts[0]).to(self.dev)),\n (\"relu1\", torch.nn.ReLU().to(self.dev))\n ])\n \n if len(self.params.layer_sizes) > 1:\n for i in range(1, len(self.params.layer_sizes)):\n model_dict[f\"layer{i+1}\"] = torch.nn.Linear(self.params.layer_sizes[i-1], self.params.layer_sizes[i]).to(self.dev)\n model_dict[f\"dp{i+1}\"] = torch.nn.Dropout(p=self.params.dropouts[i]).to(self.dev)\n model_dict[f\"relu{i+1}\"] = torch.nn.ReLU().to(self.dev)\n \n model_dict[\"last_layer\"] = torch.nn.Linear(self.params.layer_sizes[-1], 1).to(self.dev)\n \n self.model_dict = model_dict\n self.model = torch.nn.Sequential(model_dict).to(self.dev)\n else:\n raise Exception(\"Hybrid model only support regression prediction.\")\n\n def _predict_binding(self, activity, conc):\n \"\"\"\n Predict measurements of fractional binding/inhibition of target receptors by a compound with the given activity,\n in -Log scale, at the specified concentration in nM. If the given activity is pKi, a ratio to convert Ki into IC50\n is needed. It can be the ratio of concentration and Kd of the radioligand in a competitive binding assay, or the concentration\n of the substrate and Michaelis constant (Km) of enzymatic inhibition assay.\n \"\"\"\n \n if self.params.is_ki:\n if self.params.ki_convert_ratio is None:\n raise Exception(\"Ki converting ratio is missing. Cannot convert Ki into IC50\")\n Ki = 10**(9-activity)\n IC50 = Ki * (1 + self.params.ki_convert_ratio)\n else:\n IC50 = 10**(9-activity)\n pred_frac = 1.0/(1.0 + IC50/conc)\n \n return pred_frac\n\n def _l2_loss(self, yp, yr):\n \"\"\"\n Da's loss function, based on L2 terms for both pKi and percent binding values\n This function is not appropriate for model fitting, but can be used for R^2 calculation.\n \"\"\"\n yreal = yr.to(\"cpu\").numpy()\n pos_ki = np.where(np.isnan(yreal[:,1]))[0]\n pos_bind = np.where(~np.isnan(yreal[:,1]))[0]\n loss_ki = torch.sum((yp[pos_ki, 0] - yr[pos_ki, 0]) ** 2)\n if len(pos_bind[0]) == 0:\n return loss_ki, torch.tensor(0.0, dtype=torch.float32)\n # convert Ki to % binding\n y_stds = self.transformers[0].y_stds\n y_means = self.transformers[0].y_means\n if self.params.is_ki:\n bind_pred = self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1])\n else:\n bind_pred = self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1])\n # calculate the loss_bind\n loss_bind = torch.sum((bind_pred - yr[pos_bind, 0]) ** 2)\n return loss_ki, loss_bind\n\n def _poisson_hybrid_loss(self, yp, yr):\n \"\"\"\n Hybrid loss function based on L2 losses for deviations of predicted and measured pKi values\n and Poisson losses for predicted vs measured binding values. The idea is to choose loss terms\n that when minimized maximize the likelihood.\n\n Note that we compute both pKi and binding loss terms for compounds that have both kinds of data, since they are\n independent measurements. Therefore, pos_ki and pos_bind index sets may overlap.\n \"\"\"\n\n # Get indices of non-missing pKi values\n yreal = yr.to(\"cpu\").numpy()\n pos_ki = np.where(np.isnan(yreal[:,1]))[0]\n # Get indices of non-missing binding values\n pos_bind = np.where(~np.isnan(yreal[:,1]))[0]\n\n # Compute L2 loss for pKi predictions\n loss_ki = torch.sum((yp[pos_ki, 0] - yr[pos_ki, 0]) ** 2)\n #convert the ki prediction back to Ki scale\n y_stds = self.transformers[0].y_stds\n y_means = self.transformers[0].y_means\n # Compute fraction bound to *radioligand* (not drug) from predicted pKi\n if self.params.is_ki:\n rl_bind_pred = 1 - self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1])\n else:\n rl_bind_pred = 1 - self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1])\n rl_bind_real = 1 - yr[pos_bind, 0]\n # Compute Poisson loss for radioligand binding\n loss_bind = torch.sum(rl_bind_pred - rl_bind_real * torch.log(rl_bind_pred))\n\n if np.isnan(loss_ki.item()):\n raise Exception(\"Ki loss is NaN\")\n if np.isnan(loss_bind.item()):\n raise Exception(\"Binding loss is NaN\")\n return loss_ki, loss_bind\n\n def _loss_batch(self, loss_func, xb, yb, opt=None):\n \"\"\"\n Compute loss_func for the batch xb, yb. If opt is provided, perform a training\n step on the model weights.\n \"\"\"\n\n loss_ki, loss_bind = self.loss_func(self.model(xb), yb)\n loss = loss_ki + loss_bind\n \n if opt is not None:\n loss.backward()\n opt.step() \n opt.zero_grad()\n\n return loss_ki.item(), loss_bind.item(), len(xb)\n\n class SubsetData(object):\n \"\"\"\n Container for DataLoader object and attributes of a dataset subset\n \"\"\"\n def __init__(self, ds, dl, n_ki, n_bind):\n self.ds = ds\n self.dl = dl\n self.n_ki = n_ki\n self.n_bind = n_bind\n \n def _tensorize(self, x):\n return torch.tensor(x, dtype=torch.float32)\n\n def _load_hybrid_data(self, data):\n \"\"\"\n Convert the DeepChem dataset into the SubsetData for hybrid model.\n \"\"\"\n self.train_valid_dsets = []\n test_dset = data.test_dset\n num_folds = len(data.train_valid_dsets)\n\n for k in range(num_folds):\n train_dset, valid_dset = data.train_valid_dsets[k]\n # datasets were normalized in previous steps\n x_train, y_train, x_valid, y_valid = map(\n self._tensorize, (train_dset.X, train_dset.y, valid_dset.X, valid_dset.y)\n )\n # train\n train_ki_pos = np.where(np.isnan(y_train[:,1].numpy()))[0]\n train_bind_pos = np.where(~np.isnan(y_train[:,1].numpy()))[0]\n \n # valid\n valid_ki_pos = np.where(np.isnan(y_valid[:,1].numpy()))[0]\n valid_bind_pos = np.where(~np.isnan(y_valid[:,1].numpy()))[0]\n \n train_ds = TensorDataset(x_train, y_train)\n train_dl = DataLoader(train_ds, batch_size=self.params.batch_size, shuffle=True, pin_memory=True)\n train_data = self.SubsetData(train_ds, \n train_dl, \n len(train_ki_pos), \n len(train_bind_pos))\n\n valid_ds = TensorDataset(x_valid, y_valid)\n valid_dl = DataLoader(valid_ds, batch_size=self.params.batch_size * 2, pin_memory=True)\n valid_data = self.SubsetData(valid_ds, \n valid_dl, \n len(valid_ki_pos), \n len(valid_bind_pos))\n\n self.train_valid_dsets.append((train_data, valid_data))\n\n x_test, y_test = map(\n self._tensorize, (test_dset.X, test_dset.y)\n )\n test_ki_pos = np.where(np.isnan(y_test[:,1].numpy()))[0]\n test_bind_pos = np.where(~np.isnan(y_test[:,1].numpy()))[0]\n\n test_ds = TensorDataset(x_test, y_test)\n test_dl = DataLoader(test_ds, batch_size=self.params.batch_size * 2, pin_memory=True)\n test_data = self.SubsetData(test_ds, \n test_dl, \n len(test_ki_pos), \n len(test_bind_pos))\n\n self.test_data = test_data\n \n def save_model(self, checkpoint_file, model, opt, epoch, model_dict):\n \"\"\"\n Save a model to a checkpoint file.\n Include epoch, model_dict in checkpoint dict.\n \"\"\"\n checkpoint = dict(\n epoch=epoch,\n model_state_dict=model.state_dict(),\n opt_state_dict=opt.state_dict(),\n model_dict=model_dict\n )\n \n torch.save(checkpoint, checkpoint_file)\n\n def train(self, pipeline):\n self.best_epoch = 0\n self.best_valid_score = None\n self.train_epoch_perfs = np.zeros(self.params.max_epochs)\n self.valid_epoch_perfs = np.zeros(self.params.max_epochs)\n self.test_epoch_perfs = np.zeros(self.params.max_epochs)\n self.train_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.valid_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.test_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.model_choice_scores = np.zeros(self.params.max_epochs)\n self.early_stopping_min_improvement = self.params.early_stopping_min_improvement\n self.early_stopping_patience = self.params.early_stopping_patience\n\n if self.params.loss_func.lower() == \"poisson\":\n self.loss_func = self._poisson_hybrid_loss\n else:\n self.loss_func = self._l2_loss\n\n # load hybrid data\n self._load_hybrid_data(pipeline.data)\n\n checkpoint_file = os.path.join(self.model_dir, f\"{self.params.dataset_name}_model_{self.params.model_uuid}.pt\")\n\n opt = torch.optim.Adam(self.model.parameters(), lr=self.params.learning_rate)\n self.train_perf_data = []\n self.valid_perf_data = []\n self.test_perf_data = []\n for ei in range(self.params.max_epochs):\n self.train_perf_data.append(perf.create_perf_data(\"hybrid\", pipeline.data, self.transformers, 'train', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio))\n self.valid_perf_data.append(perf.create_perf_data(\"hybrid\", pipeline.data, self.transformers, 'valid', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio))\n self.test_perf_data.append(perf.create_perf_data(\"hybrid\", pipeline.data, self.transformers, 'test', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio))\n\n test_data = self.test_data\n\n time_limit = int(self.params.slurm_time_limit)\n training_start = time.time()\n\n train_dset, valid_dset = pipeline.data.train_valid_dsets[0]\n if len(pipeline.data.train_valid_dsets) > 1:\n raise Exception(\"Currently the hybrid model doesn't support K-fold cross validation splitting.\")\n test_dset = pipeline.data.test_dset\n train_data, valid_data = self.train_valid_dsets[0]\n for ei in range(self.params.max_epochs):\n if llnl_utils.is_lc_system() and (ei > 0):\n # If we're running on an LC system, check that we have enough time to complete another epoch\n # before the current job finishes, by extrapolating from the time elapsed so far.\n\n now = time.time() \n elapsed_time = now - pipeline.start_time\n training_time = now - training_start\n time_remaining = time_limit * 60 - elapsed_time\n time_needed = training_time/ei\n\n if time_needed > 0.9 * time_remaining:\n self.log.warn(\"Projected time to finish one more epoch exceeds time left in job; cutting training to %d epochs\" %\n ei)\n self.params.max_epochs = ei\n break\n\n # Train the model for one epoch. We turn off automatic checkpointing, so the last checkpoint\n # saved will be the one we created intentionally when we reached a new best validation score.\n train_loss_ep = 0\n self.model.train()\n for i, (xb, yb) in enumerate(train_data.dl):\n xb = xb.to(self.dev)\n yb = yb.to(self.dev)\n train_loss_ki, train_loss_bind, train_count = self._loss_batch(self.loss_func, xb, yb, opt)\n train_loss_ep += (train_loss_ki + train_loss_bind)\n train_loss_ep /= (train_data.n_ki + train_data.n_bind)\n\n # validation set\n with torch.no_grad():\n valid_loss_ep = 0\n for xb, yb in valid_data.dl:\n xb = xb.to(self.dev)\n yb = yb.to(self.dev)\n valid_loss_ki, valid_loss_bind, valid_count = self._loss_batch(self.loss_func, xb, yb)\n valid_loss_ep += (valid_loss_ki + valid_loss_bind)\n valid_loss_ep /= (valid_data.n_ki + valid_data.n_bind)\n\n train_pred, _ = self.generate_predictions(train_dset)\n valid_pred, _ = self.generate_predictions(valid_dset)\n test_pred, _ = self.generate_predictions(test_dset)\n\n train_perf = self.train_perf_data[ei].accumulate_preds(train_pred, train_dset.ids)\n valid_perf = self.valid_perf_data[ei].accumulate_preds(valid_pred, valid_dset.ids)\n test_perf = self.test_perf_data[ei].accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Epoch %d: training %s = %.3f, training loss = %.3f, validation %s = %.3f, validation loss = %.3f, test %s = %.3f\" % (\n ei, pipeline.metric_type, train_perf, train_loss_ep, pipeline.metric_type, valid_perf, valid_loss_ep,\n pipeline.metric_type, test_perf))\n\n # Compute performance metrics for each subset, and check if we've reached a new best validation set score\n\n self.train_epoch_perfs[ei], _ = self.train_perf_data[ei].compute_perf_metrics()\n self.valid_epoch_perfs[ei], _ = self.valid_perf_data[ei].compute_perf_metrics()\n self.test_epoch_perfs[ei], _ = self.test_perf_data[ei].compute_perf_metrics()\n valid_score = self.valid_perf_data[ei].model_choice_score(self.params.model_choice_score_type)\n self.model_choice_scores[ei] = valid_score\n self.num_epochs_trained = ei + 1\n if self.best_valid_score is None:\n self.save_model(checkpoint_file, self.model, opt, ei, self.model_dict)\n self.best_valid_score = valid_score\n self.best_epoch = ei\n elif valid_score - self.best_valid_score > self.early_stopping_min_improvement:\n # Save a new checkpoint\n self.save_model(checkpoint_file, self.model, opt, ei, self.model_dict)\n self.best_valid_score = valid_score\n self.best_epoch = ei\n elif ei - self.best_epoch > self.early_stopping_patience:\n self.log.info(f\"No improvement after {self.early_stopping_patience} epochs, stopping training\")\n break\n\n # Revert to last checkpoint\n checkpoint = torch.load(checkpoint_file)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n opt.load_state_dict(checkpoint['opt_state_dict'])\n\n # copy the best model checkpoint file\n self._clean_up_excess_files(self.best_model_dir)\n shutil.copy2(checkpoint_file, self.best_model_dir)\n self.log.info(f\"Best model from epoch {self.best_epoch} saved to {self.model_dir}\")\n\n # ****************************************************************************************\n def reload_model(self, reload_dir):\n \"\"\"Loads a saved neural net model from the specified directory.\n\n Args:\n reload_dir (str): Directory where saved model is located.\n model_dataset (ModelDataset Object): contains the current full dataset\n\n Side effects:\n Resets the value of model, transformers, and transformers_x\n \"\"\"\n \n checkpoint_file = os.path.join(reload_dir, f\"{self.params.dataset_name}_model_{self.params.model_uuid}.pt\")\n if os.path.isfile(checkpoint_file):\n checkpoint = torch.load(checkpoint_file)\n self.best_epoch = checkpoint[\"epoch\"]\n self.model = torch.nn.Sequential(checkpoint[\"model_dict\"]).to(self.dev)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.eval()\n else:\n raise Exception(f\"Checkpoint file doesn't exist in the reload_dir {reload_dir}\")\n \n # Restore the transformers from the datastore or filesystem\n self.reload_transformers()\n\n\n # ****************************************************************************************\n def get_pred_results(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset. subset may be 'train', 'valid', 'test'\n accordingly. epoch_label indicates the training epoch we want results for; currently the\n only option for this is 'best'. Results are returned as a dictionary of parameter, value pairs.\n\n Args:\n subset (str): Label for the current subset of the dataset (choices ['train','valid','test','full'])\n\n epoch_label (str): Label for the training epoch we want results for (choices ['best'])\n\n Returns:\n dict: A dictionary of parameter/ value pairs of the prediction values and results of the dataset subset\n\n Raises:\n ValueError: if epoch_label not in ['best']\n\n ValueError: If subset not in ['train','valid','test','full']\n \"\"\"\n if subset == 'full':\n return self.get_full_dataset_pred_results(self.data)\n if epoch_label == 'best':\n epoch = self.best_epoch\n model_dir = self.best_model_dir\n else:\n raise ValueError(\"Unknown epoch_label '%s'\" % epoch_label)\n if subset == 'train':\n return self.get_train_valid_pred_results(self.train_perf_data[epoch])\n elif subset == 'valid':\n return self.get_train_valid_pred_results(self.valid_perf_data[epoch])\n elif subset == 'test':\n return self.get_train_valid_pred_results(self.test_perf_data[epoch])\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def get_perf_data(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset. subset may be 'train', 'valid', 'test' or 'full',\n epoch_label indicates the training epoch we want results for; currently the\n only option for this is 'best'. Results are returned as a PerfData object of the appropriate class \n for the model's split strategy and prediction type.\n\n Args:\n subset (str): Label for the current subset of the dataset (choices ['train','valid','test','full'])\n\n epoch_label (str): Label for the training epoch we want results for (choices ['best'])\n\n Returns:\n PerfData object: Performance object pulled from the appropriate subset\n\n Raises:\n ValueError: if epoch_label not in ['best']\n\n ValueError: If subset not in ['train','valid','test','full']\n \"\"\"\n\n if subset == 'full':\n return self.get_full_dataset_perf_data(self.data)\n if epoch_label == 'best':\n epoch = self.best_epoch\n model_dir = self.best_model_dir\n else:\n raise ValueError(\"Unknown epoch_label '%s'\" % epoch_label)\n if subset == 'train':\n return self.train_perf_data[epoch]\n elif subset == 'valid':\n return self.valid_perf_data[epoch]\n elif subset == 'test':\n #return self.get_test_perf_data(model_dir, self.data)\n return self.test_perf_data[epoch]\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def generate_predictions(self, dataset):\n \"\"\"Generates predictions for specified dataset with current model, as well as standard deviations\n if params.uncertainty=True\n\n Args:\n dataset: the deepchem DiskDataset to generate predictions for\n\n Returns:\n (pred, std): tuple of predictions for compounds and standard deviation estimates, if requested.\n Each element of tuple is a numpy array of shape (ncmpds, ntasks, nclasses), where nclasses = 1 for regression\n models.\n \"\"\"\n pred, std = None, None\n self.log.info(\"Predicting values for current model\")\n\n x_data, y_data = map(\n self._tensorize, (dataset.X, dataset.y)\n )\n has_conc = len(y_data.shape) > 1 and y_data.shape[1] > 1 and np.nan_to_num(y_data[:,1]).max() > 0\n data_ki_pos = np.where(np.isnan(y_data[:,1].numpy()))[0] if has_conc else np.where(y_data[:,0].numpy())[0]\n data_bind_pos = np.where(~np.isnan(y_data[:,1].numpy()))[0] if has_conc else np.array([])\n\n data_ds = TensorDataset(x_data, y_data)\n data_dl = DataLoader(data_ds, batch_size=self.params.batch_size * 2, pin_memory=True)\n data_data = self.SubsetData(data_ds, \n data_dl, \n len(data_ki_pos), \n len(data_bind_pos))\n pred = []\n real = []\n for xb, yb in data_dl:\n xb = xb.to(self.dev)\n yb = yb.to(self.dev)\n yp = self.model(xb)\n for i in range(len(yb)):\n real.append(yb.to(\"cpu\").numpy()[i])\n pred.append(yp.detach().to(\"cpu\").numpy()[i])\n real = np.array(real)\n pred = np.array(pred)\n\n if self.params.transformers and self.transformers is not None:\n if has_conc:\n pred = np.concatenate((pred, real[:, [1]]), axis=1)\n pred = self.transformers[0].untransform(pred, isreal=False)\n pred_bind_pos = np.where(~np.isnan(pred[:, 1]))[0]\n pred[pred_bind_pos, 0] = self._predict_binding(pred[pred_bind_pos, 0], pred[pred_bind_pos, 1])\n else:\n pred = self.transformers[0].untransform(pred, isreal=False)\n else:\n if has_conc:\n pred = np.concatenate((pred, real[:, [1]]), axis=1)\n return pred, std\n\n # ****************************************************************************************\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to hybrid models.\n\n Returns:\n model_spec_metdata (dict): A dictionary of the parameter sets for the HybridModelWrapper object.\n Parameters are saved under the key 'hybrid_specific' as a subdictionary.\n \"\"\"\n nn_metadata = dict(\n best_epoch = self.best_epoch,\n max_epochs = self.params.max_epochs,\n batch_size = self.params.batch_size,\n layer_sizes = self.params.layer_sizes,\n dropouts = self.params.dropouts,\n learning_rate = self.params.learning_rate,\n )\n model_spec_metadata = dict(hybrid_specific = nn_metadata)\n return model_spec_metadata\n\n # ****************************************************************************************\n def _clean_up_excess_files(self, dest_dir):\n \"\"\"\n Function to clean up extra model files left behind in the training process.\n Does not apply to Hybrid model.\n \"\"\"\n if os.path.exists(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n\n# ****************************************************************************************\nclass DCRFModelWrapper(ModelWrapper):\n \"\"\"Contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n featurization (Featurization object): The featurization object created outside of model_wrapper\n log (log): The logger\n output_dir (str): The parent path of the model directory\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n model_dir (str): The subdirectory under output_dir that contains the model. Created in setup_model_dirs.\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n model: The dc.models.sklearn_models.SklearnModel as specified by the params attribute\n\n Created in train:\n data (ModelDataset): contains the dataset, set in pipeline\n best_epoch (int): Set to 0, not applicable to deepchem random forest models\n train_perf_data (PerfData): Contains the predictions and performance of the training dataset\n valid_perf_data (PerfData): Contains the predictions and performance of the validation dataset\n train_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n valid_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n\n \"\"\"\n\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes DCRFModelWrapper object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurization): Object managing the featurization of compounds\n ds_client: datastore client.\n \"\"\"\n super().__init__(params, featurizer, ds_client)\n self.best_model_dir = os.path.join(self.output_dir, 'best_model')\n self.model_dir = self.best_model_dir\n os.makedirs(self.best_model_dir, exist_ok=True)\n\n if self.params.prediction_type == 'regression':\n rf_model = RandomForestRegressor(n_estimators=self.params.rf_estimators,\n max_features=self.params.rf_max_features,\n max_depth=self.params.rf_max_depth,\n n_jobs=-1)\n else:\n rf_model = RandomForestClassifier(n_estimators=self.params.rf_estimators,\n max_features=self.params.rf_max_features,\n max_depth=self.params.rf_max_depth,\n n_jobs=-1)\n\n self.model = dc.models.sklearn_models.SklearnModel(rf_model, model_dir=self.best_model_dir)\n\n # ****************************************************************************************\n def train(self, pipeline):\n \"\"\"Trains a random forest model and saves the trained model.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Returns:\n None\n\n Side effects:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Set to 0, not applicable to deepchem random forest models\n\n train_perf_data (PerfData): Contains the predictions and performance of the training dataset\n\n valid_perf_data (PerfData): Contains the predictions and performance of the validation dataset\n\n train_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n\n valid_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n \"\"\"\n\n self.data = pipeline.data\n self.best_epoch = None\n self.train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers,'train')\n self.valid_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid')\n self.test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test')\n\n self.log.info(\"Fitting random forest model\")\n\n test_dset = pipeline.data.test_dset\n\n num_folds = len(pipeline.data.train_valid_dsets)\n for k in range(num_folds):\n train_dset, valid_dset = pipeline.data.train_valid_dsets[k]\n self.model.fit(train_dset)\n\n train_pred = self.model.predict(train_dset, [])\n train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids)\n\n valid_pred = self.model.predict(valid_dset, [])\n valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids)\n\n test_pred = self.model.predict(test_dset, [])\n test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Fold %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f\" % (\n k, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf,\n pipeline.metric_type, test_perf))\n\n\n # Compute mean and SD of performance metrics across validation sets for all folds\n self.train_perf, self.train_perf_std = self.train_perf_data.compute_perf_metrics()\n self.valid_perf, self.valid_perf_std = self.valid_perf_data.compute_perf_metrics()\n self.test_perf, self.test_perf_std = self.test_perf_data.compute_perf_metrics()\n\n # Compute score to be used for ranking model hyperparameter sets\n self.model_choice_score = self.valid_perf_data.model_choice_score(self.params.model_choice_score_type)\n\n if num_folds > 1:\n # For k-fold CV, retrain on the combined training and validation sets\n fit_dataset = self.data.combined_training_data()\n self.model.fit(fit_dataset, restore=False)\n self.model_save()\n # The best model is just the single RF training run.\n self.best_epoch = 0\n\n # ****************************************************************************************\n def reload_model(self, reload_dir):\n \"\"\"Loads a saved random forest model from the specified directory. Also loads any transformers that\n were saved with it.\n\n Args:\n reload_dir (str): Directory where saved model is located.\n\n model_dataset (ModelDataset Object): contains the current full dataset\n\n Side effects:\n Resets the value of model, transformers, and transformers_x\n\n \"\"\"\n if self.params.prediction_type == 'regression':\n rf_model = RandomForestRegressor(n_estimators=self.params.rf_estimators,\n max_features=self.params.rf_max_features,\n max_depth=self.params.rf_max_depth,\n n_jobs=-1)\n else:\n rf_model = RandomForestClassifier(n_estimators=self.params.rf_estimators,\n max_features=self.params.rf_max_features,\n max_depth=self.params.rf_max_depth,\n n_jobs=-1)\n\n # Restore the transformers from the datastore or filesystem\n self.reload_transformers()\n self.model = dc.models.sklearn_models.SklearnModel(rf_model, model_dir=reload_dir)\n self.model.reload()\n\n # ****************************************************************************************\n def get_pred_results(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset.\n\n Args:\n subset: 'train', 'valid', 'test' or 'full' accordingly.\n\n epoch_label: ignored; this function always returns the results for the current model.\n\n Returns:\n A dictionary of parameter, value pairs, in the format expected by the\n prediction_results element of the ModelMetrics data.\n\n Raises:\n ValueError: if subset not in ['train','valid','test','full']\n \n \"\"\"\n if subset == 'train':\n return self.get_train_valid_pred_results(self.train_perf_data)\n elif subset == 'valid':\n return self.get_train_valid_pred_results(self.valid_perf_data)\n elif subset == 'test':\n return self.get_train_valid_pred_results(self.test_perf_data)\n elif subset == 'full':\n return self.get_full_dataset_pred_results(self.data)\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n\n # ****************************************************************************************\n def get_perf_data(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset.\n\n Args:\n subset (str): may be 'train', 'valid', 'test' or 'full'\n epoch_label (not used in random forest, but kept as part of the method structure)\n\n Results:\n PerfData object: Subclass of perfdata object associated with the appropriate subset's split strategy and prediction type.\n\n Raises:\n ValueError: if subset not in ['train','valid','test','full']\n \"\"\"\n if subset == 'train':\n return self.train_perf_data\n elif subset == 'valid':\n return self.valid_perf_data\n elif subset == 'test':\n #return self.get_test_perf_data(self.best_model_dir, self.data)\n return self.test_perf_data\n elif subset == 'full':\n return self.get_full_dataset_perf_data(self.data)\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n\n # ****************************************************************************************\n def generate_predictions(self, dataset):\n \"\"\"Generates predictions for specified dataset, as well as uncertainty values if params.uncertainty=True\n\n Args:\n dataset: the deepchem DiskDataset to generate predictions for\n\n Returns:\n (pred, std): numpy arrays containing predictions for compounds and the standard error estimates.\n\n \"\"\"\n pred, std = None, None\n self.log.info(\"Evaluating current model\")\n\n pred = self.model.predict(dataset, self.transformers)\n ncmpds = pred.shape[0]\n pred = pred.reshape((ncmpds,1,-1))\n\n if self.params.uncertainty:\n if self.params.prediction_type == 'regression':\n rf_model = joblib.load(os.path.join(self.best_model_dir, 'model.joblib'))\n ## s.d. from forest\n if self.params.transformers and self.transformers is not None:\n RF_per_tree_pred = [dc.trans.undo_transforms(\n tree.predict(dataset.X), self.transformers) for tree in rf_model.estimators_]\n else:\n RF_per_tree_pred = [tree.predict(dataset.X) for tree in rf_model.estimators_]\n\n # Don't need to \"untransform\" standard deviations here, since they're calculated from\n # the untransformed per-tree predictions.\n std = np.array([np.std(col) for col in zip(*RF_per_tree_pred)]).reshape((ncmpds,1,-1))\n else:\n # We can estimate uncertainty for binary classifiers, but not multiclass (yet)\n nclasses = pred.shape[2]\n if nclasses == 2:\n ntrees = self.params.rf_estimators\n # Use normal approximation to binomial sampling error. Later we can do Jeffrey's interval if we\n # want to get fancy.\n std = np.sqrt(pred * (1-pred) / ntrees)\n else:\n self.log.warning(\"Warning: Random forest only supports uncertainties for binary classifiers.\")\n\n return pred, std\n\n # ****************************************************************************************\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to random forest models.\n\n Returns:\n model_spec_metadata (dict): Returns random forest specific metadata as a subdict under the key 'rf_specific'\n\n \"\"\"\n rf_metadata = {\n 'rf_estimators': self.params.rf_estimators,\n 'rf_max_features': self.params.rf_max_features,\n 'rf_max_depth': self.params.rf_max_depth\n }\n model_spec_metadata = dict(rf_specific = rf_metadata)\n return model_spec_metadata\n \n # ****************************************************************************************\n def _clean_up_excess_files(self, dest_dir):\n \"\"\"\n Function to clean up extra model files left behind in the training process.\n Does not apply to Random Forest.\n \"\"\"\n return\n\n# ****************************************************************************************\nclass DCxgboostModelWrapper(ModelWrapper):\n \"\"\"Contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n featurization (Featurization object): The featurization object created outside of model_wrapper\n log (log): The logger\n output_dir (str): The parent path of the model directory\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n model_dir (str): The subdirectory under output_dir that contains the model. Created in setup_model_dirs.\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n model: The dc.models.sklearn_models.SklearnModel as specified by the params attribute\n\n Created in train:\n data (ModelDataset): contains the dataset, set in pipeline\n best_epoch (int): Set to 0, not applicable\n train_perf_data (PerfObjects): Contains the predictions and performance of the training dataset\n valid_perf_data (PerfObjects): Contains the predictions and performance of the validation dataset\n train_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n valid_perfs (dict): A dictionary of predicted values and metrics on the validation dataset\n\n \"\"\"\n\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes RunModel object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurization): Object managing the featurization of compounds\n ds_client: datastore client.\n \"\"\"\n super().__init__(params, featurizer, ds_client)\n self.best_model_dir = os.path.join(self.output_dir, 'best_model')\n self.model_dir = self.best_model_dir\n os.makedirs(self.best_model_dir, exist_ok=True)\n\n if self.params.prediction_type == 'regression':\n xgb_model = xgb.XGBRegressor(max_depth=self.params.xgb_max_depth,\n learning_rate=self.params.xgb_learning_rate,\n n_estimators=self.params.xgb_n_estimators,\n silent=True,\n objective='reg:squarederror',\n booster='gbtree',\n gamma=self.params.xgb_gamma,\n min_child_weight=self.params.xgb_min_child_weight,\n max_delta_step=0,\n subsample=self.params.xgb_subsample,\n colsample_bytree=self.params.xgb_colsample_bytree,\n colsample_bylevel=1,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n base_score=0.5,\n random_state=0,\n missing=None,\n importance_type='gain',\n n_jobs=-1,\n gpu_id = 0,\n n_gpus = -1,\n max_bin = 16,\n# tree_method = 'gpu_hist',\n seed=0\n )\n else:\n xgb_model = xgb.XGBClassifier(max_depth=self.params.xgb_max_depth,\n learning_rate=self.params.xgb_learning_rate,\n n_estimators=self.params.xgb_n_estimators,\n silent=True,\n objective='binary:logistic',\n booster='gbtree',\n gamma=self.params.xgb_gamma,\n min_child_weight=self.params.xgb_min_child_weight,\n max_delta_step=0,\n subsample=self.params.xgb_subsample,\n colsample_bytree=self.params.xgb_colsample_bytree,\n colsample_bylevel=1,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n base_score=0.5,\n random_state=0,\n importance_type='gain',\n missing=None,\n gpu_id = 0,\n n_jobs=-1, \n n_gpus = -1,\n max_bin = 16,\n# tree_method = 'gpu_hist',\n seed=0\n )\n self.model = dc.models.GBDTModel(xgb_model, model_dir=self.best_model_dir)\n\n # ****************************************************************************************\n def train(self, pipeline):\n \"\"\"Trains a xgboost model and saves the trained model.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Returns:\n None\n\n Side effects:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Set to 0, not applicable to deepchem xgboost models\n\n train_perf_data (PerfData): Contains the predictions and performance of the training dataset\n\n valid_perf_data (PerfData): Contains the predictions and performance of the validation dataset\n\n train_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n\n valid_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n \"\"\"\n\n self.data = pipeline.data\n self.best_epoch = None\n self.train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers,'train')\n self.valid_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid')\n self.test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test')\n\n self.log.info(\"Fitting xgboost model\")\n\n test_dset = pipeline.data.test_dset\n\n num_folds = len(pipeline.data.train_valid_dsets)\n for k in range(num_folds):\n train_dset, valid_dset = pipeline.data.train_valid_dsets[k]\n self.model.fit(train_dset)\n\n train_pred = self.model.predict(train_dset, [])\n train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids)\n\n valid_pred = self.model.predict(valid_dset, [])\n valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids)\n\n test_pred = self.model.predict(test_dset, [])\n test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Fold %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f\" % (\n k, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf,\n pipeline.metric_type, test_perf))\n\n # Compute mean and SD of performance metrics across validation sets for all folds\n self.train_perf, self.train_perf_std = self.train_perf_data.compute_perf_metrics()\n self.valid_perf, self.valid_perf_std = self.valid_perf_data.compute_perf_metrics()\n self.test_perf, self.test_perf_std = self.test_perf_data.compute_perf_metrics()\n\n # Compute score to be used for ranking model hyperparameter sets\n self.model_choice_score = self.valid_perf_data.model_choice_score(self.params.model_choice_score_type)\n\n if num_folds > 1:\n # For k-fold CV, retrain on the combined training and validation sets\n fit_dataset = self.data.combined_training_data()\n self.model.fit(fit_dataset, restore=False)\n self.model_save()\n # The best model is just the single xgb training run.\n self.best_epoch = 0\n\n # ****************************************************************************************\n def reload_model(self, reload_dir):\n\n \"\"\"Loads a saved xgboost model from the specified directory. Also loads any transformers that\n were saved with it.\n\n Args:\n reload_dir (str): Directory where saved model is located.\n\n model_dataset (ModelDataset Object): contains the current full dataset\n\n Side effects:\n Resets the value of model, transformers, and transformers_x\n\n \"\"\"\n\n if self.params.prediction_type == 'regression':\n xgb_model = xgb.XGBRegressor(max_depth=self.params.xgb_max_depth,\n learning_rate=self.params.xgb_learning_rate,\n n_estimators=self.params.xgb_n_estimators,\n silent=True,\n objective='reg:squarederror',\n booster='gbtree',\n gamma=self.params.xgb_gamma,\n min_child_weight=self.params.xgb_min_child_weight,\n max_delta_step=0,\n subsample=self.params.xgb_subsample,\n colsample_bytree=self.params.xgb_colsample_bytree,\n colsample_bylevel=1,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n base_score=0.5,\n random_state=0,\n missing=None,\n importance_type='gain',\n n_jobs=-1,\n gpu_id = 0,\n n_gpus = -1,\n max_bin = 16,\n seed=0\n# tree_method = 'gpu_hist'\n )\n else:\n xgb_model = xgb.XGBClassifier(max_depth=self.params.xgb_max_depth,\n learning_rate=self.params.xgb_learning_rate,\n n_estimators=self.params.xgb_n_estimators,\n silent=True,\n objective='binary:logistic',\n booster='gbtree',\n gamma=self.params.xgb_gamma,\n min_child_weight=self.params.xgb_min_child_weight,\n max_delta_step=0,\n subsample=self.params.xgb_subsample,\n colsample_bytree=self.params.xgb_colsample_bytree,\n colsample_bylevel=1,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n base_score=0.5,\n random_state=0,\n importance_type='gain',\n missing=None,\n gpu_id = 0,\n n_jobs=-1, \n n_gpus = -1,\n max_bin = 16,\n seed=0\n# tree_method = 'gpu_hist',\n )\n\n # Restore the transformers from the datastore or filesystem\n self.reload_transformers()\n\n self.model = dc.models.GBDTModel(xgb_model, model_dir=self.best_model_dir)\n self.model.reload()\n\n # ****************************************************************************************\n def get_pred_results(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset.\n\n Args:\n subset: 'train', 'valid', 'test' or 'full' accordingly.\n\n epoch_label: ignored; this function always returns the results for the current model.\n\n Returns:\n A dictionary of parameter, value pairs, in the format expected by the\n prediction_results element of the ModelMetrics data.\n\n Raises:\n ValueError: if subset not in ['train','valid','test','full']\n\n \"\"\"\n if subset == 'train':\n return self.get_train_valid_pred_results(self.train_perf_data)\n elif subset == 'valid':\n return self.get_train_valid_pred_results(self.valid_perf_data)\n elif subset == 'test':\n return self.get_train_valid_pred_results(self.test_perf_data)\n elif subset == 'full':\n return self.get_full_dataset_pred_results(self.data)\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def get_perf_data(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset.\n\n Args:\n subset (str): may be 'train', 'valid', 'test' or 'full'\n\n epoch_label (not used in random forest, but kept as part of the method structure)\n\n Results:\n PerfData object: Subclass of perfdata object associated with the appropriate subset's split strategy and prediction type.\n\n Raises:\n ValueError: if subset not in ['train','valid','test','full']\n \"\"\"\n\n if subset == 'train':\n return self.train_perf_data\n elif subset == 'valid':\n return self.valid_perf_data\n elif subset == 'test':\n #return self.get_test_perf_data(self.best_model_dir, self.data)\n return self.test_perf_data\n elif subset == 'full':\n return self.get_full_dataset_perf_data(self.data)\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def generate_predictions(self, dataset):\n \"\"\"Generates predictions for specified dataset, as well as uncertainty values if params.uncertainty=True\n\n Args:\n dataset: the deepchem DiskDataset to generate predictions for\n\n Returns:\n (pred, std): numpy arrays containing predictions for compounds and the standard error estimates.\n\n \"\"\"\n pred, std = None, None\n self.log.warning(\"Evaluating current model\")\n\n pred = self.model.predict(dataset, self.transformers)\n ncmpds = pred.shape[0]\n pred = pred.reshape((ncmpds, 1, -1))\n self.log.warning(\"uncertainty not supported by xgboost models\")\n\n return pred, std\n\n # ****************************************************************************************\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to xgboost models.\n\n Returns:\n model_spec_metadata (dict): Returns xgboost specific metadata as a subdict under the key 'xgb_specific'\n\n \"\"\"\n xgb_metadata = {\"xgb_max_depth\" : self.params.xgb_max_depth,\n \"xgb_learning_rate\" : self.params.xgb_learning_rate,\n \"xgb_n_estimators\" : self.params.xgb_n_estimators,\n \"xgb_gamma\" : self.params.xgb_gamma,\n \"xgb_min_child_weight\" : self.params.xgb_min_child_weight,\n \"xgb_subsample\" : self.params.xgb_subsample,\n \"xgb_colsample_bytree\" :self.params.xgb_colsample_bytree\n }\n model_spec_metadata = dict(xgb_specific=xgb_metadata)\n return model_spec_metadata\n\n # ****************************************************************************************\n def _clean_up_excess_files(self, dest_dir):\n \"\"\"\n Function to clean up extra model files left behind in the training process.\n Does not apply to xgboost\n \"\"\"\n return\n" ]
[ [ "torch.utils.data.DataLoader", "torch.no_grad", "sklearn.ensemble.RandomForestRegressor", "torch.cuda.is_available", "torch.log", "tensorflow.executing_eagerly", "numpy.nan_to_num", "torch.nn.Dropout", "torch.save", "tensorflow.Graph", "sklearn.ensemble.RandomForestClassifier", "numpy.isnan", "torch.device", "numpy.sqrt", "tensorflow.compat.v1.Session", "torch.load", "numpy.zeros", "torch.tensor", "numpy.std", "torch.utils.data.TensorDataset", "torch.sum", "torch.nn.Linear", "tensorflow.train.get_checkpoint_state", "tensorflow.train.latest_checkpoint", "torch.nn.Sequential", "numpy.array", "numpy.concatenate", "torch.nn.ReLU" ] ]
tblaschke/reinvent-multi-target
[ "a555431da0de038b7e643c508a7d2501e83c087f" ]
[ "reinvent/unittest_reinvent/scoring_tests/physchem/test_hbd_lipinski.py" ]
[ "import unittest\n\nimport numpy as np\nimport numpy.testing as npt\n\nfrom scoring.component_parameters import ComponentParameters\nfrom scoring.function import CustomSum\nfrom utils.enums.component_specific_parameters_enum import ComponentSpecificParametersEnum\nfrom utils.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum\nfrom utils.enums.transformation_type_enum import TransformationTypeEnum\n\n\nclass Test_tpsa_score_no_transformation(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n sf_enum = ScoringFunctionComponentNameEnum()\n csp_enum = ComponentSpecificParametersEnum()\n ts_parameters = ComponentParameters(component_type=sf_enum.NUM_HBD_LIPINSKI,\n name=\"NumHBD_Lipinski\",\n weight=1.,\n smiles=[],\n model_path=\"\",\n specific_parameters={\n csp_enum.TRANSFORMATION: False\n })\n self.sf_state = CustomSum(parameters=[ts_parameters])\n\n def test_hbd_1(self):\n smiles = [\n \"C(=O)N\",\n 'O=S(=O)(c3ccc(n1nc(cc1c2ccc(cc2)C)C(F)(F)F)cc3)N'\n ]\n values = np.array([1., 1.])\n score = self.sf_state.get_final_score(smiles=smiles)\n npt.assert_array_equal(score.total_score, values)\n\n\nclass Test_tpsa_score_with_double_sigmoid(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n sf_enum = ScoringFunctionComponentNameEnum()\n csp_enum = ComponentSpecificParametersEnum()\n tt_enum = TransformationTypeEnum()\n specific_parameters = {\n csp_enum.TRANSFORMATION: True,\n csp_enum.LOW: 0,\n csp_enum.HIGH: 1,\n csp_enum.TRANSFORMATION_TYPE: tt_enum.STEP\n }\n ts_parameters = ComponentParameters(component_type=sf_enum.NUM_HBD_LIPINSKI,\n name=\"NumHBD_Lipinski\",\n weight=1.,\n smiles=[],\n model_path=\"\",\n specific_parameters=specific_parameters\n )\n self.sf_state = CustomSum(parameters=[ts_parameters])\n\n def test_hbd_1(self):\n smiles = [\n \"C(=O)N\",\n 'O=S(=O)(c3ccc(n1nc(cc1c2ccc(cc2)C)C(F)(F)F)cc3)N'\n ]\n values = np.array([1.0, 1.0])\n score = self.sf_state.get_final_score(smiles=smiles)\n npt.assert_array_equal(score.total_score, values)\n\n\n\n" ]
[ [ "numpy.array", "numpy.testing.assert_array_equal" ] ]
yongleex/SBCC
[ "40f8e67e446fc14fc82ea87f82ee841d62520c71" ]
[ "utils/outlier.py" ]
[ "import numpy as np\nimport cv2\n\n\ndef NMT(u,v, eps=0.2, thr=5.0, smooth_flag=True):\n \"\"\" \n Normalised Median Test, from 'Universal outlier detection for PIV data'\n \"\"\"\n u, v = np.float32(u), np.float32(v)\n criterion = 0\n \n for c in [u,v]:\n c_median = cv2.medianBlur(c, 5)\n residual = np.abs(c - c_median)\n r_median = cv2.medianBlur(residual, 5)\n cri = residual/(r_median + eps)\n criterion += np.power(cri, 2)\n\n criterion = np.sqrt(criterion)\n index = criterion > thr\n\n u_out, v_out = u, v\n u_out[index] = cv2.medianBlur(u, 5)[index]\n v_out[index] = cv2.medianBlur(v, 5)[index]\n \n if smooth_flag:\n u_out = cv2.GaussianBlur(u_out, (3,3),0)\n v_out = cv2.GaussianBlur(v_out, (3,3),0)\n return u_out, v_out, index\n\n\n\n" ]
[ [ "numpy.sqrt", "numpy.power", "numpy.float32", "numpy.abs" ] ]
eglrp/ConvPoint_Keras
[ "66c94479ff8dc8ad174ed4da8e6bb1d641a8a8c0" ]
[ "ConvPoint.py" ]
[ "from dataTool import ReadLabels, ReadXYZ, VisualizePointCloudClassesAsync, modelPath, DataTool\nfrom imports import *\nimport math\nimport numpy as np\nfrom time import time \n\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.utils import Sequence\nfrom tensorflow.keras.layers import Input, BatchNormalization, Dense, Dropout, InputLayer\n\nfrom sklearn.neighbors import KDTree\nfrom sklearn.metrics import confusion_matrix\nfrom PIL import Image, ImageEnhance, ImageOps\n\nimport random\n# from notify_run import Notify\n\nclass Const:\n @staticmethod\n def IsWindowsMachine():\n if os.path.isdir(\"C:/Program Files\"):\n return True\n else: \n return False\n \n if os.path.isdir(\"C:/Program Files\"):\n batchSize = 8\n else:\n batchSize = 16 #25\n\n #Placeholders\n classCount = Label.Semantic3D.Count-1\n classNames = Label.Semantic3D.Names\n\n testFiles = []\n excludeFiles = []\n Paths = Paths.Semantic3D\n \n epochs = 100\n pointComponents = 3\n featureComponents = 3 #rgb \n classCount = 0\n npoints = 8192\n blocksize = 8\n test_step = 0.5\n name = \"\"\n\n #Algorithm configuration\n noFeature = False\n Fusion = False\n Scale = False\n Rotate = False\n Mirror = False\n Jitter = False\n FtrAugment = False\n\n logsPath = \"./logs\"\n ### MODEL CONFIG\n pl = 64\n ### MODEL CONFIG\n\n def BuildSpecDict(self):\n return {\"noFeature\" : self.noFeature,\n \"Fusion\" : self.Fusion,\n \"Scale\" : self.Scale,\n \"Rotate\" : self.Rotate,\n \"Mirror\" : self.Mirror,\n \"Jitter\" : self.Jitter,\n \"FtrAugment\" : False if self.noFeature else self.FtrAugment,\n }\n\n def Name(self, UID = \"\"):\n modelName = self.name\n \n modelName += f\"({len(self.TrainFiles())}&{len(self.TestFiles())})\"\n\n for spec, value in self.BuildSpecDict().items():\n if(value == True):\n modelName += f\"({spec})\"\n\n if(UID != \"\"):\n modelName += f\"_{UID}\"\n\n return modelName\n \n @staticmethod\n def RemoveUID(name : str):\n return name.replace(f\"_{Const.ParseModelUID(name)}\", \"\")\n \n @staticmethod\n def UID():\n import uuid\n return uuid.uuid4().hex\n \n @staticmethod\n def ParseModelConfig(file):\n config = Paths.FileName(file).split(\"_\")[0].replace(\"(\",\" \").replace(\")\",\"\").replace(\"vox \",\"\").split(\" \")\n\n const = None\n if(config[0] == NPM3D.name):\n const = NPM3D() \n if(config[0] == Semantic3D.name):\n const = Semantic3D()\n \n for conf in config[1:]:\n if conf == \"noFeature\" or conf == \"NOCOL\":\n const.noFeature = True\n elif conf == \"Fusion\":\n const.Fusion = True\n elif conf == \"Scale\":\n const.Scale = True\n elif conf == \"Rotate\":\n const.Rotate = True\n elif conf == \"Mirror\":\n const.Mirror = True\n elif conf == \"Jitter\":\n const.Jitter = True\n elif conf == \"FtrAugment\":\n const.FtrAugment = True\n \n return const\n \n @staticmethod\n def ParseModelUID(file):\n parts = Paths.FileName(file).split(\"_\")\n\n if(len(parts) >= 2):\n return parts[1]\n else:\n return None\n\n @staticmethod\n def ParseModelName(file, withUID = True):\n parts = Paths.FileName(file, withoutExt = False).split(\"_\")\n\n name = parts[0]\n if(withUID and len(parts) > 1):\n name += \"_\"+parts[1]\n\n return name\n\n def TestFiles(self): \n return Paths.JoinPaths(self.Paths.processedTrain, self.testFiles)\n\n def TrainFiles(self):\n return Paths.GetFiles(self.Paths.processedTrain, excludeFiles = self.TestFiles()+self.excludeFiles)\n\nclass Semantic3D(Const): \n pointComponents = 3\n featureComponents = 3 #rgb\n classCount = Label.Semantic3D.Count-1\n classNames = Label.Semantic3D.Names\n test_step = 0.8\n name = \"Sem3D\"\n Paths = Paths.Semantic3D\n\n testFiles = [\n \"untermaederbrunnen_station3_xyz_intensity_rgb_voxels.npy\",\n \"domfountain_station1_xyz_intensity_rgb_voxels.npy\",\n ]\n\n excludeFiles = []\n\n fileNames = {\"birdfountain_station1_xyz_intensity_rgb\" : \"birdfountain1\",\n \"castleblatten_station1_intensity_rgb\" : \"castleblatten1\",\n \"castleblatten_station5_xyz_intensity_rgb\" : \"castleblatten5\",\n \"marketplacefeldkirch_station1_intensity_rgb\" : \"marketsquarefeldkirch1\",\n \"marketplacefeldkirch_station4_intensity_rgb\" : \"marketsquarefeldkirch4\",\n \"marketplacefeldkirch_station7_intensity_rgb\" : \"marketsquarefeldkirch7\",\n \"sg27_station3_intensity_rgb\" : \"sg27_3\",\n \"sg27_station6_intensity_rgb\" : \"sg27_6\",\n \"sg27_station8_intensity_rgb\" : \"sg27_8\",\n \"sg27_station10_intensity_rgb\" : \"sg27_10\",\n \"sg28_station2_intensity_rgb\" : \"sg28_2\",\n \"sg28_station5_xyz_intensity_rgb\" : \"sg28_5\",\n \"stgallencathedral_station1_intensity_rgb\" : \"stgallencathedral1\",\n \"stgallencathedral_station3_intensity_rgb\" : \"stgallencathedral3\",\n \"stgallencathedral_station6_intensity_rgb\" : \"stgallencathedral6\",\n\n \"MarketplaceFeldkirch_Station4_rgb_intensity-reduced\" : \"marketsquarefeldkirch4-reduced\",\n \"sg27_station10_rgb_intensity-reduced\" : \"sg27_10-reduced\",\n \"sg28_Station2_rgb_intensity-reduced\" : \"sg28_2-reduced\",\n \"StGallenCathedral_station6_rgb_intensity-reduced\" : \"stgallencathedral6-reduced\",\n }\n\nclass Curbs(Const): \n pointComponents = 3\n featureComponents = 3\n classCount = 2\n classNames = Label.Curbs.Names\n test_step = 0.5\n name = \"Curbs\"\n Paths = Paths.Curbs\n\n if os.path.isdir(\"C:/Program Files\"):\n batchSize = 8\n else:\n batchSize = 25\n\n testFiles = [\n \"park_extracted.npy\",\n \"Jelskio_str_trimmed.npy\",\n ]\n \n excludeFiles = [\n \"powerlines_dataset\"\n ]\n\n def FilterCurbAndLineFiles(self, files):\n return [file for file in files if not file.endswith(\"_curbs.npy\") and not file.endswith(\"_lines.npy\")]\n\n def TestFiles(self): \n return self.FilterCurbAndLineFiles(super(Curbs, self).TestFiles())\n\n def TrainFiles(self):\n return self.FilterCurbAndLineFiles(super(Curbs, self).TrainFiles())\n\nclass NPM3D(Const):\n pointComponents = 3\n featureComponents = 1\n classCount = Label.NPM3D.Count-1\n classNames = Label.NPM3D.Names\n test_step = 0.5\n name = \"NPM3D\"\n Paths = Paths.NPM3D \n\n testFiles = [\n # \"Lille1_1_0.npy\",\n # \"Lille1_1_1.npy\",\n # \"Lille1_1_2.npy\",\n # \"Lille1_1_3.npy\",\n # \"Lille1_1_4.npy\",\n # \"Lille1_1_5.npy\",\n # \"Lille1_1_6.npy\",\n # \"Lille1_1_7.npy\",\n # \"Lille1_1_8.npy\",\n\n # \"Lille1_2_0.npy\",\n # \"Lille1_2_1.npy\",\n \n \"Lille2_0.npy\",\n \"Lille2_1.npy\",\n \"Lille2_2.npy\", \n \"Lille2_8.npy\", \n \"Lille2_9.npy\", \n\n # \"Paris_0.npy\",\n # \"Paris_1.npy\",\n ]\n \n excludeFiles = [\n # \"Lille1_1_7.npy\",\n # \"Lille1_2_2.npy\",\n \"Lille2_10.npy\",\n # \"Paris_2.npy\",\n ]\n\nclass WeightsMul(tf.keras.layers.Layer):\n def __init__(self, shape, lowBound, highBound, **kwargs):\n super(WeightsMul, self).__init__(**kwargs)\n self.shape = shape\n self.lowBound = lowBound\n self.highBound = highBound\n\n def build(self, input_shape):\n init = tf.random_uniform_initializer(self.lowBound, self.highBound)\n self.vars = self.add_weight(shape=(self.shape), \n initializer = init, \n trainable = True, dtype=tf.float32)\n\n def call(self, inputs): \n return tf.matmul(inputs, self.vars)\n \n def get_config(self):\n config = super(WeightsMul, self).get_config()\n config.update({'shape': self.shape, 'lowBound': self.lowBound, 'highBound': self.highBound})\n return config\n\nclass GatherNDLayer(tf.keras.layers.Layer):\n def __init__(self, **kwargs): \n super(GatherNDLayer, self).__init__(**kwargs)\n \n def call(self, array, indices):\n return tf.gather_nd(array, indices, batch_dims=1)\n \n def get_config(self):\n config = super(GatherNDLayer, self).get_config()\n return config\n\nclass SubstractCenters(tf.keras.layers.Layer):\n def __init__(self, dim, n_centers, **kwargs):\n super(SubstractCenters, self).__init__(**kwargs)\n self.dim = dim\n self.n_centers = n_centers\n \n def build(self, input_shape):\n center_data = np.zeros((self.dim, self.n_centers))\n for i in range(self.n_centers):\n coord = np.random.rand(self.dim)*2 - 1\n while (coord**2).sum() > 1:\n coord = np.random.rand(self.dim)*2 - 1\n center_data[:,i] = coord\n\n self.centers = self.add_weight(shape = (center_data.shape), \n initializer = tf.constant_initializer(center_data), \n trainable = True, dtype=tf.float32)\n\n def call(self, points): \n return points - self.centers\n \n def get_config(self):\n config = super(SubstractCenters, self).get_config()\n config.update({'dim': self.dim, 'n_centers': self.n_centers})\n return config\n\nclass UnitBallNormalize(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(UnitBallNormalize, self).__init__(**kwargs)\n\n def call(self, points):\n maxi = tf.sqrt(tf.reduce_max(tf.reduce_sum(tf.square(tf.stop_gradient(points)), axis = 3), axis = 2))\n maxi = tf.where(tf.equal(maxi, 0.0), tf.constant(1.0), maxi)\n points = points / tf.expand_dims(tf.expand_dims(maxi, 2), 3)\n return points\n \n def get_config(self):\n config = super(UnitBallNormalize, self).get_config()\n return config\n\ndef PtConv(fts, points, K, next_pts, in_features, out_features, n_centers = 16):\n next_pts_ = None\n if isinstance(next_pts, int) and points.get_shape()[1] != next_pts:\n # convolution with reduction\n indices, next_pts_ = KDTreeSampleLayer(K, next_pts)(points)\n elif (next_pts is None) or (isinstance(next_pts, int) and points.get_shape()[1] == next_pts):\n # convolution without reduction\n indices = KDTreeLayer(K)(points, points)\n next_pts_ = points\n else:\n # convolution with up sampling or projection on given points\n indices = KDTreeLayer(K)(points, next_pts)\n next_pts_ = next_pts\n \n if next_pts is None or isinstance(next_pts, int):\n next_pts = next_pts_\n\n # get the features and point cooridnates associated with the indices\n pts = GatherNDLayer()(points, indices)\n if fts is None:\n features = tf.expand_dims(tf.ones_like(pts[:,:,:,0]), 3)\n else:\n features = GatherNDLayer()(fts, indices) \n\n # center the neighborhoods\n pts = pts - tf.expand_dims(next_pts,2)\n\n # normalize to unit ball, or not\n pts = UnitBallNormalize()(pts)\n\n # compute the distances\n dists = SubstractCenters(3, n_centers)(tf.expand_dims(pts, 4))\n\n dShape = dists.shape\n dists = tf.reshape(dists, (-1, dShape[1], dShape[2], dShape[3]*dShape[4]))\n\n dists = DenseInitialized(2*n_centers, activation=\"relu\")(dists)\n dists = DenseInitialized(n_centers, activation=\"relu\")(dists)\n dists = DenseInitialized(n_centers, activation=\"relu\")(dists)\n \n # compute features \n fs = features.shape # [batch, points, n_centers, in_features]\n ds = dists.shape\n\n features = tf.transpose(features,[0, 1, 3, 2])\n features = tf.reshape(features, (-1, features.shape[2], features.shape[3])) #features.shape[0]*features.shape[1]\n dists = tf.reshape(dists, (-1, dists.shape[2], dists.shape[3])) #dists.shape[0]*dists.shape[1]\n\n features = tf.matmul(features, dists)\n features = tf.reshape(features, (-1, ds[1], features.shape[1]*features.shape[2]))\n\n bound = math.sqrt(3.0) * math.sqrt(2.0 / (in_features + out_features))\n features = WeightsMul([in_features * n_centers, out_features], -bound, bound)(features)\n\n features = features / fs[2]\n\n # normalization and activation\n features = BatchNormalization(epsilon = 1e-05, momentum=0.9)(features) \n\n features = tf.nn.relu(features)\n\n return features, next_pts\n\ndef LinearInitializer(k):\n k = np.sqrt(1.0/float(k))\n return tf.random_uniform_initializer(k*-1, k)\n\ndef DenseInitialized(out_features, activation = None, name = None):\n def DenseInit(x):\n return Dense(out_features, \n kernel_initializer = tf.initializers.lecun_normal(),\n bias_initializer = tf.initializers.lecun_normal(),\n activation = activation,\n name = name,\n )(x)\n\n return DenseInit\n\ndef CreateModel(classCount, ftsComp, in_fts = None, in_pts = None, returnFeatures = False, noColor = False, applySoftmax = True):\n print(\"Creating new model...\")\n \n if(in_fts is None and in_pts is None):\n in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32) #points \n\n if(noColor):\n in_fts = None\n else:\n in_fts = Input(shape=(Const.npoints, ftsComp), dtype=tf.float32) #featuress \n \n if(noColor):\n in_fts = None\n\n pl = Const.pl\n ### Down Sample\n x0, _ = PtConv(in_fts, in_pts, K = 16, next_pts = None, in_features = ftsComp, out_features = pl)\n x1, pts1 = PtConv(x0, in_pts, K = 16, next_pts = 2048, in_features = pl, out_features = pl)\n x2, pts2 = PtConv(x1, pts1, K = 16, next_pts = 1024, in_features = pl, out_features = pl)\n x3, pts3 = PtConv(x2, pts2, K = 16, next_pts = 256, in_features = pl, out_features = pl)\n x4, pts4 = PtConv(x3, pts3, K = 8, next_pts = 64, in_features = pl, out_features = pl*2)\n x5, pts5 = PtConv(x4, pts4, K = 8, next_pts = 16, in_features = pl*2, out_features = pl*2)\n x6, pts6 = PtConv(x5, pts5, K = 4, next_pts = 8, in_features = pl*2, out_features = pl*2)\n\n ## Up Sample\n x5d, _ = PtConv(x6, pts6, K = 4, next_pts = pts5, in_features = pl*2, out_features = pl*2)\n x5d = tf.concat([x5d, x5], axis = 2)\n\n x4d, _ = PtConv(x5d, pts5, K = 4, next_pts = pts4, in_features = pl*4, out_features = pl*2)\n x4d = tf.concat([x4d, x4], axis = 2)\n\n x3d, _ = PtConv(x4d, pts4, K = 4, next_pts = pts3, in_features = pl*4, out_features = pl)\n x3d = tf.concat([x3d, x3], axis = 2)\n\n x2d, _ = PtConv(x3d, pts3, K = 8, next_pts = pts2, in_features = pl*2, out_features = pl)\n x2d = tf.concat([x2d, x2], axis = 2)\n\n x1d, _ = PtConv(x2d, pts2, K = 8, next_pts = pts1, in_features = pl*2, out_features = pl)\n x1d = tf.concat([x1d, x1], axis = 2)\n\n x0d, _ = PtConv(x1d, pts1, K = 8, next_pts = in_pts, in_features = pl*2, out_features = pl)\n x0d = tf.concat([x0d, x0], axis = 2)\n \n ### Output layer\n out_labels = Dropout(rate=0.5)(x0d)\n \n out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]))\n \n out_labels = DenseInitialized(classCount)(out_labels)\n\n out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))\n\n if(applySoftmax):\n out_labels = tf.nn.softmax(out_labels)\n\n if(noColor):\n inputList = [in_pts]\n else:\n inputList = [in_fts, in_pts]\n\n if(returnFeatures):\n return Model(inputList, [x0d, out_labels], name =\"model\")\n \n model = Model(inputList, out_labels, name =\"model\")\n model = CompileModel(model, classCount) \n # print(model.summary())\n return model\n\ndef ModifyModelOutput(model, classCount):\n dropoutLayer = model.layers[len(model.layers)-5] #take output of the drop out layer\n out_labels = dropoutLayer.output\n\n out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]), name = \"lbl_reshape_1\")\n out_labels = DenseInitialized(classCount, name = \"lbl_dense\")(out_labels) \n out_labels = tf.reshape(out_labels, (-1, dropoutLayer.input.shape[1], out_labels.shape[1]), name = \"lbl_reshape_2\")\n out_labels = tf.nn.softmax(out_labels, name = \"lbl_softmax\")\n\n return Model(model.inputs, out_labels, name =\"model\")\n\ndef ReadModel(modelPath):\n if(not modelPath.endswith(\".h5\")):\n modelPath += \".h5\"\n\n if(not os.path.exists(modelPath)):\n if(os.path.exists(os.path.join(\".\" , \"data\", modelPath))):\n modelPath = os.path.join(\".\" , \"data\", modelPath)\n elif(os.path.exists(os.path.join(\".\" , \"data\", Const.ParseModelName(modelPath, False)))):\n file = os.path.basename(modelPath)\n folder = os.path.join(\".\" , \"data\", Const.ParseModelName(modelPath, False))\n modelPath = os.path.join(folder, file)\n elif(os.path.exists(os.path.join(\".\" , \"data\", Const.ParseModelName(modelPath)))):\n file = os.path.basename(modelPath)\n folder = os.path.join(\".\" , \"data\", Const.ParseModelName(modelPath))\n modelPath = os.path.join(folder, file)\n\n if(not os.path.exists(modelPath)):\n raise FileNotFoundError \n\n model = tf.keras.models.load_model(modelPath, compile=False,\n custom_objects={'NearestNeighborsLayer': NearestNeighborsLayer, \n 'SampleNearestNeighborsLayer': SampleNearestNeighborsLayer,\n 'SubstractCenters': SubstractCenters,\n 'WeightsMul': WeightsMul,\n 'GatherNDLayer':GatherNDLayer,\n 'UnitBallNormalize':UnitBallNormalize,\n 'KDTreeSampleLayer':KDTreeSampleLayer,\n 'KDTreeLayer':KDTreeLayer,\n })\n\n PrintToLog(\"{} model loaded\".format(modelPath))\n return model\n\ndef LatestModel(path):\n if(Const.ParseModelUID(path) is None):\n folders = [os.path.join(\".\" , \"data\",folder) for folder in os.listdir(os.path.join(\".\" , \"data\")) \n if os.path.isdir(os.path.join(\".\" , \"data\",folder)) \n and path == Const.RemoveUID(Const.ParseModelName(folder))\n and len(Paths.GetFiles(os.path.join(\".\" , \"data\",folder), findExtesions=\".h5\")) > 0]\n path = max(folders, key=os.path.getctime)\n else:\n path = os.path.join(\".\" , \"data\", Const.ParseModelName(path)) \n\n try:\n latestModel = max(Paths.GetFiles(path, findExtesions=\".h5\"), key=os.path.getctime)\n except:\n print(f\"No model found in: {path}\")\n latestModel = None\n\n return latestModel\n\nimport re\ndef ModelValMIOU(path):\n result = re.findall(\"val\\((.+)\\)\", path)\n return float(result[0])\n\ndef HighestValMIOUModel(path):\n if(not os.path.isdir(path)):\n path = os.path.join(\".\" , \"data\", os.path.basename(path).split(\"_\")[0])\n\n latestModel = max(Paths.GetFiles(path, findExtesions=\".h5\"), key=ModelValMIOU)\n return latestModel\n\ndef LoadModel(modelPath, consts):\n model = ReadModel(modelPath)\n\n modified = False\n if(model.output.shape[2] != consts.classCount):\n print(\"Model output {} classes changed to {}\".format(model.output.shape[2], consts.classCount))\n modified = True\n model = ModifyModelOutput(model, consts.classCount)\n\n model = CompileModel(model, consts.classCount)\n # model.summary()\n return model, modified\n\ndef ReadModelConfig(path):\n Model = ReadModel(path)\n modelConfig = Const.ParseModelConfig(path)\n return Model, modelConfig\n\ndef CreateModelCopy(Model, modelConfig, in_pts, in_RGB):\n inputFeatures = 1 if modelConfig.noFeature else modelConfig.featureComponents\n newModel = CreateModel(modelConfig.classCount, inputFeatures, in_RGB, in_pts, noColor=modelConfig.noFeature, returnFeatures=True, applySoftmax=False)\n\n if(Model != None):\n for new_layer, layer in zip(newModel.layers, Model.layers):\n new_layer.set_weights(layer.get_weights())\n\n return newModel\n\ndef FuseModels(modelPaths, consts):\n fusionModel = None\n\n assert(len(modelPaths) == 2 or modelPaths is None)\n print(\"Model fusion\")\n \n if(not modelPaths is None):\n ModelA, modelAConfig = ReadModelConfig(modelPaths[0])\n ModelB, modelBConfig = ReadModelConfig(modelPaths[1])\n else:\n consts.noFeature = False\n modelAConfig = consts\n consts.noFeature = True\n modelBConfig = consts\n\n in_RGB = None\n if(not modelAConfig.noFeature or not modelBConfig.noFeature):\n in_RGB = Input(shape=(Const.npoints, consts.featureComponents), dtype=tf.float32, name = \"In_RGB\") #features\n in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32, name = \"In_pts\") #points\n\n newModelA = CreateModelCopy(ModelA, modelAConfig, in_pts, in_RGB)\n newModelB = CreateModelCopy(ModelB, modelBConfig, in_pts, in_RGB)\n\n x = tf.concat((newModelA.output[0], newModelB.output[0]), axis = 2) #fuse features from both models\n\n x1, _ = PtConv(x, in_pts, K = 16, next_pts = Const.npoints, in_features = 2*128, out_features = 96)\n x2, _ = PtConv(x1, in_pts, K = 16, next_pts = Const.npoints, in_features = 96, out_features = 48)\n x0d = tf.concat([x2, newModelA.output[1], newModelB.output[1]], axis = 2)\n\n out_labels = tf.reshape(x0d, (-1, x0d.shape[2]))\n out_labels = Dropout(rate=0.5)(out_labels)\n out_labels = DenseInitialized(consts.classCount)(out_labels)\n out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))\n\n out_labels = tf.nn.softmax(out_labels)\n\n fusionModel = Model([in_pts] if in_RGB is None else [in_RGB, in_pts], out_labels, name =\"model\")\n\n nontrainableNames = [x.name for x in newModelA.layers] + [x.name for x in newModelB.layers]\n # nontrainableNames = [x.name for x in newModelA.layers]\n count = 0\n for i, layer in enumerate(fusionModel.layers):\n if(layer.name in nontrainableNames):\n layer.trainable = False\n count += 1\n\n PrintToLog(f\"{len(fusionModel.layers)-count}/{len(fusionModel.layers)} layers are trainable.\")\n\n fusionModel = CompileModel(fusionModel, consts.classCount)\n # fusionModel.summary()\n return fusionModel\n\nclass MIOU(tf.keras.metrics.Metric):\n \n def __init__(self, classCount, name='miou', **kwargs):\n super(MIOU, self).__init__(name=name, **kwargs)\n self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)\n self.classCount = classCount\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)\n PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)\n confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount) \n self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))\n\n def result(self):\n union = tf.linalg.diag_part(self.cm)\n rowSum = tf.math.reduce_sum(self.cm, axis = 0)\n colSum = tf.math.reduce_sum(self.cm, axis = 1)\n intersection = (colSum + rowSum - union)\n intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)\n iou = union / intersection\n miou = tf.expand_dims(tf.convert_to_tensor(tf.reduce_sum(iou) / tf.cast(iou.shape[0], dtype=np.float64)), 0)\n return tf.concat((tf.expand_dims(miou,1), tf.cast(tf.expand_dims(iou,1), tf.float64)), 0)\n\n def reset_states(self):\n # The state of the metric will be reset at the start of each epoch.\n self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))\n\ndef moving_miou_metric(classCount):\n def moving_iou(y_true, y_pred):\n TrueLbl = tf.argmax(tf.reshape(y_true, [-1, classCount]), axis= 1)\n PredLbl = tf.argmax(tf.reshape(y_pred, [-1, classCount]), axis= 1)\n\n cm = tf.math.confusion_matrix(TrueLbl, PredLbl, classCount)\n\n union = tf.linalg.diag_part(cm)\n\n rowSum = tf.math.reduce_sum(cm, axis = 0)\n colSum = tf.math.reduce_sum(cm, axis = 1)\n\n intersection = (colSum + rowSum - union)+1\n\n iou = union / intersection\n\n return tf.reduce_sum(iou) / tf.cast(tf.math.maximum(iou.shape[0], 1), dtype=np.float64)\n\n return moving_iou\n\nclass IOU(tf.keras.metrics.Metric):\n def __init__(self, classCount, classIndex, name='iou', **kwargs):\n super(IOU, self).__init__(name=name, **kwargs)\n self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)\n self.classCount = classCount\n self.classIndex = classIndex\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)\n PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)\n confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount)\n self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))\n\n def result(self):\n union = tf.linalg.diag_part(self.cm)\n rowSum = tf.math.reduce_sum(self.cm, axis = 0)\n colSum = tf.math.reduce_sum(self.cm, axis = 1)\n intersection = (colSum + rowSum - union)\n intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)\n iou = union / intersection\n return tf.cast(tf.expand_dims(iou, 1)[self.classIndex], tf.float64)\n\n def reset_states(self):\n # The state of the metric will be reset at the start of each epoch.\n self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))\n\ndef weighted_categorical_crossentropy(weights):\n # weights = [0.9,0.05,0.04,0.01]\n def wcce(y_true, y_pred):\n Kweights = tf.constant(weights)\n y_true = tf.cast(y_true, y_pred.dtype)\n return tf.keras.losses.categorical_crossentropy(y_true, y_pred) * tf.math.reduce_sum(y_true * Kweights, axis=-1)\n\n return wcce\n\ndef CompileModel(model, classCount):\n model.compile(\n optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3, epsilon = 1e-8),\n loss = tf.keras.losses.CategoricalCrossentropy(),\n # loss = weighted_categorical_crossentropy([0.7, 5]),\n metrics= [IOU(classCount, 0, name=\"other\"), IOU(classCount, 1, name=\"curb\")] if classCount == 2 else [MIOU(classCount)]\n )\n return model\n\nclass IOUPerClass(tf.keras.callbacks.Callback):\n def __init__(self, plot_path, classNames, firstEpoch = 0, metric = \"miou\"):\n self.metric = metric\n self.epoch = firstEpoch \n self.classCount = len(classNames)\n self.classNames = classNames\n self.path = plot_path\n\n print(f\"IOU logs path: {self.path}\")\n\n self.writers = []\n self.val_writers = []\n ioupath = os.path.join(plot_path, \"iou\")\n os.makedirs(ioupath, exist_ok=True)\n for i in range(self.classCount):\n path = os.path.join(ioupath, classNames[i])\n os.makedirs(path, exist_ok=True)\n self.writers.append(tf.summary.create_file_writer(path))\n\n path = os.path.join(ioupath, \"val_\"+classNames[i])\n os.makedirs(path, exist_ok=True)\n self.val_writers.append(tf.summary.create_file_writer(path))\n # print(\"Writer path: \", path)\n \n self.InitializeMIOUWriter() \n\n def InitializeMIOUWriter(self):\n mioupath = os.path.join(self.path, \"miou\")\n os.makedirs(mioupath, exist_ok=True)\n\n path = os.path.join(mioupath, \"miou\")\n os.makedirs(path, exist_ok=True)\n self.miou_writer = tf.summary.create_file_writer(path)\n\n path = os.path.join(mioupath, \"val_miou\")\n os.makedirs(path, exist_ok=True)\n self.val_miou_writer = tf.summary.create_file_writer(path)\n \n def WriteLog(self, writer, metric, logs, epoch, tag = \"miou\"):\n value = logs.get(metric)\n if(value is None):\n print(f\"Failed getting {metric} log\")\n return False\n \n with writer.as_default():\n tf.summary.scalar(tag, value[0][0], step=epoch)\n writer.flush()\n\n def WriteLogs(self, writers, metric, logs, epoch, tag = \"iou\"):\n metrix = logs.get(metric)\n if(metrix is None):\n print(f\"Failed getting {metric} log\")\n return False\n\n iou = [i[0] for i in metrix[len(metrix)-self.classCount:]]\n for i in range(len(iou)):\n with writers[i].as_default():\n tf.summary.scalar(tag, iou[i], step=epoch)\n writers[i].flush()\n \n def on_epoch_end(self, batch, logs=None):\n self.WriteLogs(self.writers, self.metric, logs, self.epoch)\n self.WriteLogs(self.val_writers, \"val_\"+self.metric, logs, self.epoch)\n\n self.WriteLog(self.miou_writer, self.metric, logs, self.epoch)\n self.WriteLog(self.val_miou_writer, \"val_\"+self.metric, logs, self.epoch)\n self.epoch += 1\n\nlogSaveDir = \"\"\ndef WriteToLog(msg):\n if(os.path.isdir(logSaveDir)):\n logFile = open(logSaveDir+f\"/training.log\", \"a\")\n logFile.write(msg+\"\\n\")\n logFile.close()\n\ndef PrintToLog(msg):\n print(msg)\n WriteToLog(msg)\n\nclass ModelSaveCallback(tf.keras.callbacks.Callback):\n def __init__(self, saveDir, trainingSteps, metric = \"accuracy\", modelNamePrefix = \"\", sendNotifications = False):\n super().__init__()\n self.saveDir = saveDir\n self.metric = metric\n self.modelNamePrefix = modelNamePrefix\n\n self.epoch = 0\n self.trainingSteps = trainingSteps\n \n self.sendNotifications = sendNotifications\n if(self.sendNotifications):\n self.notifyDevice = Notify()\n \n os.makedirs(self.saveDir, exist_ok=True)\n WriteToLog(f\"Training: {modelNamePrefix}\")\n\n def on_epoch_end(self, epoch, logs=None):\n self.epoch = epoch + 1\n if(len(logs) > 0):\n miou = logs.get(self.metric)[0]*100\n val_metric = \"val_\"+self.metric\n val_miou = logs.get(val_metric)[0]*100\n SaveModel(self.saveDir, epoch, self.model, miou, val_miou, self.modelNamePrefix)\n\n message = \"Ep: {0}. {1}: {2:.3}%. {3}: {4:.3}%\".format(self.epoch, self.metric, miou, val_metric, val_miou)\n WriteToLog(message)\n\n f = open(\"demofile3.txt\", \"w\")\n f.write(\"Woops! I have deleted the content!\")\n f.close()\n\n if(self.sendNotifications):\n try: \n self.notifyDevice.send(self.modelNamePrefix + \" \" + message)\n except:\n print(\"notifyDevice error\")\n \n # def on_batch_end(self, batch, logs=None):\n # progress = batch/self.trainingSteps * 100\n # if(progress % 10 == 0):\n # try:\n # message = \"Ep. {0} {1}% done. {2}: {3:.3}%\".format(self.epoch+1, int(progress), self.metric, logs.get(self.metric)*100)\n # self.notifyDevice.send(message)\n # except:\n # print(\"notifyDevice error\")\n\ndef ParseEpoch(modelPath):\n filename = os.path.basename(modelPath)\n return int(filename.split(\"_\")[2])\n\ndef GetValidationData(testFiles, consts, batchesCount = 100, newDataGeneration = False):\n print(\"Gathering validation data...\")\n print(f\"Test files: {testFiles}\")\n\n if(newDataGeneration):\n PrintToLog(\"Use TestSequence for validation.\")\n\n assert(len(testFiles) == 1)\n seq = TestSequence(testFiles[0], consts, test = True)\n else:\n PrintToLog(\"Use TrainSequence for validation.\")\n\n seq = TrainSequence(testFiles, batchesCount, consts, dataAugmentation = False) \n \n if not consts.noFeature:\n ftsList = np.zeros((0, consts.npoints, consts.featureComponents), np.float32)\n ptsList = np.zeros((0, consts.npoints, 3), np.float32)\n lbsList = np.zeros((0, consts.npoints, consts.classCount), np.uint8)\n\n if(newDataGeneration):\n indexes = np.arange(min(batchesCount, len(seq)))\n np.random.shuffle(indexes)\n else:\n indexes = range(batchesCount)\n\n for i in indexes:\n if consts.noFeature:\n if(newDataGeneration):\n ptslbl = seq.__getitem__(i)\n else:\n pts, lbl = seq.__getitem__(i)\n ptslbl = [pts[0], lbl]\n \n ptsList = np.concatenate((ptsList, ptslbl[0]), 0)\n lbsList = np.concatenate((lbsList, ptslbl[1]), 0)\n else:\n if(newDataGeneration):\n ftsptslbl = seq.__getitem__(i)\n else:\n ftspts, lbl = seq.__getitem__(i)\n ftsptslbl = [ftspts[0], ftspts[1], lbl]\n \n ftsList = np.concatenate((ftsList, ftsptslbl[0]), 0)\n ptsList = np.concatenate((ptsList, ftsptslbl[1]), 0)\n lbsList = np.concatenate((lbsList, ftsptslbl[2]), 0)\n \n PrintToLog(f\"Generated {len(lbsList)} validation samples.\")\n\n if consts.noFeature:\n return (ptsList, lbsList)\n else:\n return ([ftsList, ptsList], lbsList)\n \ndef TrainModel(trainFiles, testFiles, consts : Const, modelPath = None, saveDir = Paths.dataPath, classes = None, first_epoch = 0, epochs = None, sendNotifications = False): \n model = None\n modelName = None\n if(modelPath != None):\n if(not isinstance(modelPath, list)):\n modelName = Const.ParseModelName(modelPath)\n if(consts.Name() != Const.RemoveUID(modelName)):\n modelName = consts.Name(consts.UID()) \n logSaveDir = saveDir + f\"/{modelName}/\"\n\n if(isinstance(modelPath, list)):\n model = FuseModels(modelPath, consts)\n else:\n model, modified = LoadModel(modelPath, consts)\n if(not modified):\n first_epoch = ParseEpoch(modelPath) +1\n else:\n if(consts.Fusion):\n model = FuseModels(None, consts)\n else:\n model = CreateModel(consts.classCount, 1 if consts.noFeature else consts.featureComponents, noColor=consts.noFeature)\n \n if(modelName is None or modelName == \"\"):\n modelName = consts.Name(consts.UID())\n logSaveDir = saveDir + f\"/{modelName}/\"\n\n PrintToLog(\"Train {} on {} files. Test on {} files\".format(modelName, len(trainFiles), len(testFiles)))\n PrintToLog(\"Validate on :\" + str(testFiles))\n\n trainingSteps = int((1000*16)/consts.batchSize) if not Const.IsWindowsMachine() else int(10)\n PrintToLog(\"Batch size: {}, trainingSteps: {}\".format(consts.batchSize, trainingSteps))\n\n logsPath = os.path.join(consts.logsPath, Const.RemoveUID(modelName))\n os.makedirs(logsPath, exist_ok=True)\n callbacks_list = [] \n callbacks_list.append(ModelSaveCallback(logSaveDir, trainingSteps, \"curb\", modelNamePrefix = modelName, sendNotifications=sendNotifications))\n # callbacks_list.append(IOUPerClass(logsPath, consts.classNames[1:], first_epoch+1))\n # callbacks_list.append(tf.keras.callbacks.TensorBoard(log_dir=logsPath, update_freq=\"batch\", histogram_freq=0, profile_batch = 0)) # tensorboard 2.0.2\n\n seq = TrainSequence(trainFiles, trainingSteps, consts)\n validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)\n validationData = None if len(testFiles) == 0 else GetValidationData(testFiles, consts, validationSteps)\n\n if(epochs is None):\n epochs = 20 if consts.Fusion else 100\n\n model.fit(seq, validation_data = validationData, epochs = epochs, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300, callbacks=callbacks_list, initial_epoch = first_epoch)\n\ndef EvaluateModels(modelsList, testFiles, consts, x = None, y = None):\n if(x is None or y is None):\n validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)\n x, y = GetValidationData(testFiles, consts, validationSteps, newDataGeneration = False)\n\n for file in modelsList:\n model, _ = LoadModel(file, consts)\n metrics = model.evaluate(x, y, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300)\n # print(f\"miou: {metrics[2][0][0]*100:.3}\")\n\ndef SaveModel(saveDir, epoch, model, train_score, val_score=0, modelNamePrefix = \"\"):\n if(modelNamePrefix != \"\"):\n modelNamePrefix += \"_\"\n fileName = saveDir+\"/{0}{1}{2}{3}.h5\".format(modelNamePrefix, epoch, f\"_train({train_score:.3})\", f\"_val({val_score:.3})\" if val_score != 0 else \"\")\n if(not os.path.isdir(saveDir)):\n os.mkdir(saveDir) \n if(os.path.exists(fileName)):\n os.remove(fileName) \n model.save(fileName, include_optimizer=False)\n\ndef RotatePointCloud(batch_data):\n \"\"\" Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n \"\"\"\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, sinval, 0],\n [-sinval, cosval, 0],\n [0, 0, 1],])\n return np.dot(batch_data, rotation_matrix)\n\ndef JitterRGB(features):\n features = features.astype(np.uint8)\n assert(np.max(features) > 1)\n\n img = Image.fromarray(np.expand_dims(features,0), mode=\"RGB\")\n\n low = 0.4\n high = 1.6\n #1 is baseline\n img = ImageEnhance.Brightness(img).enhance(np.random.uniform(low, high))\n img = ImageEnhance.Color(img).enhance(np.random.uniform(low, high))\n img = ImageEnhance.Contrast(img).enhance(np.random.uniform(low, high))\n\n img = ImageEnhance.Sharpness(img).enhance(np.random.uniform(low, high))\n if(np.random.uniform(low, high) > 1):\n img = ImageOps.equalize(img) \n if(np.random.uniform(low, high) > 1):\n img = ImageOps.autocontrast(img)\n\n new_features = np.array(img).reshape((-1, 3))\n return new_features\n\ndef JitterReflectance(features, sigma=40): #input [0; 255]\n assert(features.shape[1] == 1)\n randJitters = np.random.randint(-sigma, sigma, size = features.shape)\n features += randJitters\n features = np.clip(features, 0, 255)\n return features\n\ndef JitterPoints(points, sigma=0.01):\n \"\"\" Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n \"\"\" \n C = 3\n assert(points.shape[1] == C)\n\n randJitters = np.random.uniform(-sigma, sigma, size = points.shape)\n return points + randJitters\n\ndef Mirror(points, axis, min = True):\n if(min):\n axisValue = np.amin(points[:,axis])\n else:\n axisValue = np.amax(points[:,axis])\n\n distances = np.abs(points[:, axis] - axisValue)\n newpoints = np.array(points, copy=True)\n\n newpoints[:,axis] = newpoints[:,axis] + distances*(-2 if min else 2)\n return newpoints\n\ndef MirrorPoints(points): \n assert(len(points.shape) == 2 and points.shape[1] == 3)\n\n mirrorDirection = random.choice([\"xMin\", \"xMax\", \"yMin\", \"yMax\", \"\"])\n\n if(mirrorDirection == \"xMin\"):\n points = Mirror(points, 0, min = True)\n elif(mirrorDirection == \"xMax\"):\n points = Mirror(points, 0, min = False)\n elif(mirrorDirection == \"yMin\"):\n points = Mirror(points, 1, min = True)\n elif(mirrorDirection == \"yMax\"):\n points = Mirror(points, 1, min = False)\n \n return points\n\ndef ScalePoints(points, sigma = 0.02):\n \"\"\" Scale up or down random by small percentage\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, scaled batch of point clouds\n \"\"\"\n assert(points.shape[1]==3)\n\n scale = np.random.uniform(1-sigma, 1+sigma)\n scale_matrix = np.array([[scale, 0, 0],\n [0, scale, 0],\n [0, 0, scale]])\n scaled = np.dot(points, scale_matrix)\n\n return scaled\n\nclass TrainSequence(Sequence):\n def __init__(self, filelist, iteration_number, consts : Const, dataAugmentation = True):\n self.filelist = filelist\n self.ptsList = [np.load(file) for file in self.filelist]\n self.ptsList = sorted(self.ptsList, key=len)\n self.ptsListCount = np.cumsum([len(pts) for pts in self.ptsList])\n\n self.cts = consts\n self.dataAugmentation = dataAugmentation\n self.iterations = iteration_number\n\n def __len__(self):\n return int(self.iterations)\n\n def PickRandomPoint(self, lbl):\n lblIdx = []\n\n while True:\n randClass = random.randint(0, self.cts.classCount-1)\n lblIdx = np.where(lbl == randClass)[0]\n\n if(len(lblIdx) >= 2):\n break\n\n return lblIdx[random.randint(0, len(lblIdx)-1)] \n\n def __getitem__(self, _):\n if not self.cts.noFeature:\n ftsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.featureComponents), np.float32) \n ptsList = np.zeros((self.cts.batchSize, self.cts.npoints, 3), np.float32)\n lbsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.classCount), np.uint8)\n \n for i in range(self.cts.batchSize):\n # load the data\n ptIdx = random.randint(0, self.ptsListCount[-1])\n pts = self.ptsList[np.argmax(self.ptsListCount >= ptIdx)]\n \n # if(self.cts.featureComponents == 1):\n # keepPts = (pts[:, 4] != 0)\n # else:\n # keepPts = (pts[:, 6] != 0)\n # pts = pts[keepPts]\n\n # get the features\n if(self.cts.featureComponents == 1):\n if not self.cts.noFeature: \n fts = np.expand_dims(pts[:,3], 1).astype(np.float32)\n lbs = pts[:,4].astype(int)\n else:\n if not self.cts.noFeature:\n fts = pts[:,3:6].astype(np.float32)\n lbs = pts[:,6].astype(int)\n\n if(np.min(lbs) == 1):\n lbs -= 1 #class 0 is filtered out\n \n # get the point coordinates\n pts = pts[:, :3]\n\n # pick a random point\n pt_id = random.randint(0, pts.shape[0]-1)\n pt = pts[pt_id]\n\n # create the mask\n mask_x = np.logical_and(pts[:,0]<pt[0]+self.cts.blocksize/2, pts[:,0]>pt[0]-self.cts.blocksize/2)\n mask_y = np.logical_and(pts[:,1]<pt[1]+self.cts.blocksize/2, pts[:,1]>pt[1]-self.cts.blocksize/2)\n mask = np.logical_and(mask_x, mask_y)\n temppts = pts[mask]\n templbs = lbs[mask]\n if not self.cts.noFeature:\n tempfts = fts[mask]\n \n # random selection\n choice = np.random.choice(temppts.shape[0], self.cts.npoints, replace=True)\n temppts = temppts[choice] \n if not self.cts.noFeature: \n tempfts = tempfts[choice]\n\n templbs = templbs[choice]\n encodedLbs = np.zeros((len(templbs), self.cts.classCount))\n encodedLbs[np.arange(len(templbs)),templbs] = 1\n templbs = encodedLbs\n\n # if self.dataAugmentation:\n # dt = DataTool()\n # dt.VisualizePointCloudAsync([temppts], [tempfts/255])\n\n # data augmentation\n if self.dataAugmentation:\n if(self.cts.Mirror):\n temppts = MirrorPoints(temppts)\n if(self.cts.Rotate):\n temppts = RotatePointCloud(temppts)\n if(self.cts.Scale):\n temppts = ScalePoints(temppts, sigma = 0.02)\n if(self.cts.Jitter):\n temppts = JitterPoints(temppts, sigma = 0.01)\n\n if(not self.cts.noFeature and self.cts.FtrAugment):\n if(self.cts.featureComponents == 3):\n tempfts = JitterRGB(tempfts)\n elif(self.cts.featureComponents == 1):\n tempfts = JitterReflectance(tempfts)\n \n if(not self.cts.noFeature):\n tempfts = tempfts.astype(np.float32)\n tempfts = tempfts/255 # - 0.5\n \n # if self.dataAugmentation:\n # # visualize data\n # dt = DataTool()\n # dt.VisualizePointCloud([temppts], [tempfts], windowName = \"Augmented\")\n # linePoints = np.where(templbs[:, 1] == 1)[0]\n # DataTool().VisualizePointCloud([np.delete(temppts, linePoints, axis=0), temppts[linePoints]], [[0,0,1], [1,0,0]], windowName=\"Sampled\")\n\n if not self.cts.noFeature:\n ftsList[i] = np.expand_dims(tempfts, 0)\n ptsList[i] = np.expand_dims(temppts, 0)\n lbsList[i] = np.expand_dims(templbs, 0)\n \n if self.cts.noFeature:\n return [ptsList], lbsList\n else: # works for RGB and fusion models\n return [ftsList, ptsList], lbsList\n\nclass TestSequence(Sequence):\n def __init__(self, filename, consts, splitDataSetToParts = -1, windowsMachineCap = True, test = False):\n self.filename = filename\n self.batchSize = consts.batchSize\n self.npoints = consts.npoints\n self.nocolor = consts.noFeature\n self.bs = consts.blocksize\n self.featureComponents = consts.featureComponents\n self.fusion = consts.Fusion\n self.test = test\n\n if(self.test):\n self.classCount = consts.classCount\n self.lbl = []\n\n if(self.filename.endswith(\".ply\")):\n from plyfile import PlyData\n plydata = PlyData.read(self.filename)\n x = plydata[\"vertex\"].data[\"x\"].astype(np.float32)\n y = plydata[\"vertex\"].data[\"y\"].astype(np.float32)\n z = plydata[\"vertex\"].data[\"z\"].astype(np.float32)\n fts = plydata[\"vertex\"].data[\"reflectance\"].astype(np.float32)\n self.xyzrgb = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1), np.expand_dims(fts, 1)), axis=1)\n elif(self.filename.endswith(\".npy\")):\n xyzftsl = np.load(self.filename)\n if(xyzftsl.shape[1] == 5):\n self.xyzrgb = xyzftsl[:, :4]\n if(self.test):\n self.lbl = xyzftsl[:, 4] - 1\n else: #if(xyzftsl.shape[1] == 7):\n self.xyzrgb = xyzftsl[:, :6]\n if(self.test):\n self.lbl = xyzftsl[:, 6] - 1\n elif(self.filename.endswith(\".las\")):\n from dataTool import ReadXYZRGB \n xyz, rgb = ReadXYZRGB(self.filename)\n self.xyzrgb = np.concatenate((xyz, rgb), 1)\n\n print(\"Test_step:\", consts.test_step)\n step = consts.test_step\n discretized = ((self.xyzrgb[:,:2]).astype(float)/step).astype(int)\n self.allpts = np.unique(discretized, axis=0)\n self.allpts = self.allpts.astype(np.float)*step\n\n if(consts.IsWindowsMachine() and windowsMachineCap):\n self.allpts = self.allpts[:115] #small sample for testing\n\n self.splitDataSetToParts = splitDataSetToParts\n if(self.splitDataSetToParts != -1):\n self.ptIndex = 0\n else:\n self.pts = self.allpts\n self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)\n\n self.sparseCubes = 0\n self.sparseCubesPtCount = 0\n\n def LenParts(self):\n if(self.splitDataSetToParts != -1):\n return math.ceil(len(self.allpts)/self.splitDataSetToParts)\n else:\n return 1\n\n def NextPart(self):\n if(self.splitDataSetToParts <= 0):\n return False\n if(self.ptIndex >= len(self.allpts)):\n return False\n\n self.nextIndex = np.min([self.ptIndex+self.splitDataSetToParts, len(self.allpts)])\n self.pts = self.allpts[self.ptIndex : self.nextIndex]\n self.ptIndex = self.nextIndex\n\n self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)\n return True\n\n def __len__(self):\n return math.ceil(len(self.pts)/self.batchSize)\n\n def compute_mask(self, pt, bs):\n # build the mask\n mask_x = np.logical_and(self.xyzrgb[:,0]<pt[0]+bs/2, self.xyzrgb[:,0]>pt[0]-bs/2)\n mask_y = np.logical_and(self.xyzrgb[:,1]<pt[1]+bs/2, self.xyzrgb[:,1]>pt[1]-bs/2)\n mask = np.logical_and(mask_x, mask_y)\n return mask \n\n def __getitem__(self, index):\n size = min(self.batchSize, len(self.pts) - (index * self.batchSize))\n\n if not self.nocolor:\n ftsList = np.zeros((size, self.npoints, self.featureComponents), np.float32)\n ptsList = np.zeros((size, self.npoints, 3), np.float32)\n if(self.test):\n lblList = np.zeros((size, self.npoints, self.classCount), np.float32)\n \n for i in range(size):\n # get the data \n mask = self.compute_mask(self.pts[index*self.batchSize + i], self.bs)\n pts = self.xyzrgb[mask]\n\n if(self.test):\n lbl = self.lbl[mask]\n\n if(len(pts) < self.npoints):\n self.sparseCubes += 1\n self.sparseCubesPtCount += len(pts)\n\n # choose right number of points\n choice = np.random.choice(pts.shape[0], self.npoints, replace=True)\n pts = pts[choice]\n if(self.test):\n lbl = lbl[choice]\n\n # labels will contain indices in the original point cloud\n idx = np.where(mask)[0][choice]\n self.idxList[index*self.batchSize + i] = np.expand_dims(idx, 0)\n\n # separate between features and points\n if not self.nocolor:\n if(self.featureComponents == 1):\n fts = np.expand_dims(pts[:,3], 1)\n else:\n fts = pts[:,3:6]\n fts = fts/255 #- 0.5\n\n pts = pts[:, :3].copy()\n\n if not self.nocolor:\n ftsList[i] = np.expand_dims(fts, 0)\n ptsList[i] = np.expand_dims(pts, 0)\n if self.test:\n lblList[i, np.arange(len(lbl)), lbl.astype(int)] = 1\n\n add_lbl = []\n if self.test:\n add_lbl = [lblList]\n\n if self.nocolor:\n return [ptsList] + add_lbl\n else: #works for RGB\n return [ftsList, ptsList] + add_lbl\n\ndef GenerateData(modelPath, testFiles, consts, outputFolder, NameIncludeModelInfo = False):\n model, _ = LoadModel(modelPath, consts)\n\n if(not NameIncludeModelInfo):\n outputFolder = os.path.join(outputFolder, Paths.FileName(modelPath))\n os.makedirs(outputFolder, exist_ok=True)\n\n for file in testFiles:\n t = time()\n\n baseName = Paths.FileName(file)\n if(NameIncludeModelInfo):\n baseName = baseName + \"_\" + Paths.FileName(modelPath)\n baseName += \".txt\"\n\n newFile = os.path.join(outputFolder, baseName)\n if(os.path.exists(newFile)):\n print(\"All ready exists: \",newFile)\n continue\n else:\n open(newFile, \"a\").close()\n\n print(\"Generating: \", newFile)\n GenerateFile(model, file, consts, newFile)\n print(\"Done in {:02d}:{:02d} min.\".format(int((time() - t)/60), int((time() - t)%60)))\n\ndef GenerateLargeData(modelPath, voxelFiles, consts, outputFolder, orgFiles = None, replace = False, Upscale = True, NameIncludeModelInfo = False):\n from time import time\n\n model, _ = LoadModel(modelPath, consts)\n\n if(not NameIncludeModelInfo):\n outputFolder = outputFolder + Paths.FileName(modelPath)\n if not Upscale:\n outputFolder = outputFolder+\"/vox_lbl/\"\n os.makedirs(outputFolder, exist_ok=True)\n\n if isinstance(voxelFiles, str):\n voxelFiles = Paths.GetFiles(voxelFiles)\n \n if isinstance(orgFiles, str):\n orgFiles = Paths.GetFiles(orgFiles)\n \n for voxelFile in voxelFiles:\n baseName = Paths.FileName(voxelFile).replace(\"_voxels\", \"\")\n\n if not (orgFiles is None):\n orgFile = [f for f in orgFiles if Paths.FileName(f).startswith(baseName)]\n if(len(orgFile) != 1):\n print(\"Skip: \", voxelFile)\n continue\n orgFile = orgFile[0]\n else:\n orgFile = None\n\n t = time()\n\n if(NameIncludeModelInfo):\n baseName = baseName + \"_\" + Paths.FileName(modelPath)\n \n if Upscale: \n newFile = os.path.join(outputFolder, baseName+\".labels\")\n else: \n newFile = os.path.join(outputFolder, baseName+\".npy\")\n if(not replace and os.path.exists(newFile)):\n print(newFile,\" already exists.\")\n continue\n \n flagFile = newFile+\".tmp\"\n if(os.path.exists(flagFile)):\n print(\"Other worker is generating: \", newFile)\n continue\n else:\n open(flagFile, \"a\").close()\n\n print(\"Generating: \", newFile)\n GenerateLargeFile(model, voxelFile, orgFile, consts, newFile, Upscale = Upscale)\n\n os.remove(flagFile)\n print(\"{} generated in {:02d}:{:02d} min.\".format(baseName, int((time() - t)/60), int((time() - t)%60)))\n\ndef GenerateFile(model, file, consts, outputFile, saveScores = True):\n seq = TestSequence(file, consts)\n output = model.predict(seq, workers = consts.batchSize, max_queue_size = 300, verbose = 1)\n\n # for y in range(len(seq)):\n # pts = seq.__getitem__(y)\n # pts = pts[0]\n # pred = model.predict(pts)\n\n # for i in range(len(pred)):\n # predPtsIdx = np.where(np.argmax(pred[i], axis = 1) == 1)[0]\n # # truePtsIdx = np.where(np.argmax(lbl[i], axis = 1) == 1)[0]\n \n # # print(f\"True curb points: {len(truePtsIdx)}. Found curb points: {len(predPtsIdx)}\")\n # DataTool().VisualizePointCloud([np.delete(pts[i], predPtsIdx, axis=0), pts[i][predPtsIdx]], [[0,0,1], [1,0,0]])\n\n idx = seq.idxList\n xyzrgb = seq.xyzrgb[:,:3]\n scores = np.zeros((xyzrgb.shape[0], consts.classCount))\n\n for i in range(len(output)):\n scores[idx[i]] += output[i] \n\n mask = np.logical_not(scores.sum(1)==0)\n scores = scores[mask]\n pts_src = xyzrgb[mask]\n\n # create the scores for all points\n indexes = nearest_correspondance(pts_src.astype(np.float32), xyzrgb.astype(np.float32), K=1)\n scores = scores[indexes]\n \n if saveScores:\n scoresFile = outputFile.replace(\".txt\", \"_scores.npy\")\n np.save(scoresFile, scores)\n print(f\"Scores saved to: {scoresFile}\")\n \n scores = scores.argmax(1) + 1 #because all class are shifted to avoid 0 - unclassified\n \n print(f\"class 0: {len(np.where(scores == 0)[0])}, class 1: {len(np.where(scores == 1)[0])}\")\n\n import pandas as pd\n print(\"Save labels: \", scores.shape)\n pd.DataFrame(scores, dtype=np.uint8).to_csv(outputFile, sep='\\t', header=None, index=None)\n\ndef SaveLabelsPnts(labels, outputFile):\n import pandas as pd \n print(\"Saving pts lbs...\")\n if(len(labels.shape) == 1):\n pd.DataFrame(labels, dtype=np.uint8).to_csv(outputFile, sep='\\t', header=None, index=None)\n else:\n np.save(outputFile, labels)\n print(\"Pts lbs {} saved!\".format(labels.shape))\n\ndef UpscaleToOriginal(originalPoints, pts_src, lbl, outputFile = None):\n from tqdm import tqdm\n # create the scores for all points\n step = 10000000 #1000000\n fullLbl = np.zeros((0,), np.int8)\n print(\"KDTree magic. Source pts: {}. Queary pts: {}\".format(len(pts_src), len(originalPoints)))\n for i in tqdm(range(0, math.ceil(len(originalPoints)/step))):\n a = i*step\n b = a + np.min([len(originalPoints)-a, step])\n indexes = nearest_correspondance(pts_src, originalPoints[a:b], K=1)\n fullLbl = np.concatenate([fullLbl, lbl[indexes]], 0)\n\n if(not (outputFile is None)):\n SaveLabelsPnts(fullLbl, outputFile)\n else:\n return fullLbl\n\ndef GenerateLargeFile(model, voxelFile, originalFile, consts, outputFile, Upscale = True, saveScores = True):\n from dataTool import ReadXYZ\n from tqdm import tqdm\n\n seq = TestSequence(voxelFile, consts, splitDataSetToParts=16000)\n print(\"All pts: \", len(seq.allpts))\n\n xyzrgb = seq.xyzrgb[:,:3]\n scores = np.zeros((xyzrgb.shape[0], consts.classCount))\n\n for _ in tqdm(range(seq.LenParts())):\n seq.NextPart()\n output = model.predict(seq, workers = consts.batchSize, max_queue_size = 300, verbose = 1)\n\n idx = seq.idxList\n for i in range(len(output)):\n scores[idx[i]] += output[i]\n\n mask = np.logical_not(scores.sum(1)==0)\n scores = scores[mask]\n pts_src = xyzrgb[mask].astype(np.float32)\n\n if saveScores:\n scoresFile = os.path.splitext(outputFile)[0]+\"_scores.npy\"\n np.save(scoresFile, scores)\n print(f\"Scores saved to: {scoresFile}\")\n\n lbl = scores.argmax(1)\n \n if(Upscale and not (originalFile is None)):\n print(\"Load original file: \", originalFile)\n originalPoints = ReadXYZ(originalFile).astype(np.float32)\n assert(originalPoints.shape[1] == 3)\n UpscaleToOriginal(originalPoints, pts_src, lbl, outputFile)\n else: \n SaveLabelsPnts(np.concatenate([pts_src, np.expand_dims(lbl, 1)], axis=1), outputFile)\n\ndef UpscaleFilesAsync(modelPath, voxelFolder, orgFolder, savePath):\n import time\n # notifyDevice = Notify()\n\n savePath = savePath + Paths.FileName(modelPath)\n\n print(f\"Searching in folder: {savePath+'/vox_lbl/'}\")\n\n while True:\n found = False\n\n fileNames = Semantic3D.fileNames\n for file in Paths.GetFiles(savePath, onlyNames=True, withoutExtension=True, findExtesions=('.labels')):\n if(file in fileNames or fileNames.values()):\n fileNames = {key:val for key, val in fileNames.items() if val != file and key != file}\n \n if(len(fileNames) == 0): \n print(\"Done upscaling files\")\n # notifyDevice.send(\"Done upscaling files\")\n return\n\n for file in Paths.GetFiles(savePath+\"/vox_lbl/\", onlyNames=True, withoutExtension=True, findExtesions=('.npy')): \n ptslbs = os.path.join(savePath+\"/vox_lbl/\", file+\".npy\")\n # originalFile = os.path.join(orgFolder, file+\".npy\")\n originalFile = os.path.join(orgFolder, file+\".hdf5\")\n outputFile = os.path.join(savePath, file+\".labels\")\n\n if(not os.path.exists(outputFile)):\n found = True\n open(outputFile, \"a\").close()\n UpscaleFile(ptslbs, originalFile, outputFile)\n \n if not found:\n time.sleep(10) #sleep for 10 second and scan for job again\n\ndef UpscaleFile(ptslbsFile, originalFile, outputFile):\n from dataTool import ReadLabels, ReadXYZ\n\n print(\"Upscaling: {}\".format(ptslbsFile))\n scores = ReadLabels(ptslbsFile, readFormat = \".npy\")\n scores = np.squeeze(scores, 1)\n pts_src = ReadXYZ(ptslbsFile, readFormat = \".npy\")\n originalPoints = ReadXYZ(originalFile)\n\n UpscaleToOriginal(originalPoints, pts_src, scores, outputFile)\n\ndef nearest_correspondance(pts_src, pts_dest, K=1):\n # print(\"KDTree magic. Source pts: {}. Queary pts: {}\".format(len(pts_src), len(pts_dest)))\n # t = time()\n kdt = KDTree(pts_src, leaf_size=20)\n _, indexes = kdt.query(pts_dest, k = K)\n # print(\"Done in {}:{} min.\".format(int((time() - t)/60), int((time() - t)%60))) \n return np.squeeze(indexes, 1)\n\ndef TestTestSequence(path, consts): \n seq = TestSequence(path, consts)\n\n allPts = np.zeros((len(seq.xyzrgb), 3))\n\n for i in range(len(seq)):\n inpt = seq[i]\n\n ftsList = inpt[0]\n ptsList = inpt[1]\n\n for j in range(len(ptsList)):\n allPts[seq.idxList[i*consts.batchSize + j]] = ptsList[j]\n \n emptyPts = np.logical_not(allPts.sum(1) != 0)\n\n print(\"sparseCubes: \",seq.sparseCubes)\n print(\"mean sparseCubes pt count: \", seq.sparseCubesPtCount/seq.sparseCubes)\n print(\"Not picked points: {} => {:.2f}%\".format(len(emptyPts), len(emptyPts)/len(allPts)))\n\n nonEmptyPts = np.logical_not(emptyPts)\n\n a = seq.xyzrgb[emptyPts]\n b = seq.xyzrgb[nonEmptyPts]\n\n dt = DataTool()\n dt.VisualizePointCloud([a, b], [[1,0,0], None])\n\nif(os.path.exists(\"C:/Program Files\")):\n import open3d as o3d\n import time\n from dataTool import LoadRenderOptions, SaveRenderOptions, GetPointsIndexInBoundingBox, GetPointsInBoundingBox\n\nclass BoxesIterator:\n def __init__(self, boxes, points, colors, labels):\n # self.pc = o3d.geometry.PointCloud()\n # self.pc.points = o3d.utility.Vector3dVector(points)\n self.src_points = points\n self.src_colors = colors if np.max(colors) <= 1 else colors/255\n self.src_labels = labels\n self.dst_points = np.zeros((0, 3), dtype = np.float)\n self.dst_colors = np.zeros((0, 3), dtype = np.float)\n self.boxes = boxes\n self.i = 0\n # self.kdt = KDTree(points, leaf_size=20) \n\n self.trajectory = None\n # if(os.path.exists(\"./data/camera_trajectory.json\")):\n # self.trajectory = o3d.io.read_pinhole_camera_trajectory(\"./data/camera_trajectory.json\").parameters\n # self.trajectory_i = 0\n # self.trajectory_time = time.time()\n\n grey = np.array([128, 128, 128])/255\n red = np.array([136, 0, 1])/255\n mint = np.array([170, 255, 195])/255\n teal = np.array([0, 128, 128])/255\n green = np.array([60, 180, 75])/255\n verygreen = np.array([0, 255, 0])/255\n brown = np.array([170, 110, 40])/255\n # white = np.array([255, 255, 255])/255\n black = np.array([0, 0, 0])/255\n blue = np.array([0, 0, 255])/255 \n pink = np.array([255, 56, 152])/255 \n\n #NPM3D\n self.colors = []\n if(np.max(self.src_labels) == 9):\n self.colors = [grey, red, blue, teal, mint, brown, pink, black, green]\n #Semantic3D\n elif(np.max(self.src_labels) == 8):\n self.colors = [grey, verygreen, green, mint, red, blue, brown, black]\n \n self.pc = o3d.geometry.PointCloud() \n self.pc.points = o3d.utility.Vector3dVector(self.src_points)\n\n self.box = o3d.geometry.LineSet()\n lines = np.array([[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],[0, 4], [1, 5], [2, 6], [3, 7]])\n self.box.lines = o3d.utility.Vector2iVector(lines)\n self.box.colors = o3d.utility.Vector3dVector(np.array([[1,0,0] for _ in range(len(lines))]))\n\n self.initSet = False\n\n def ColorPtsByClass(self, pts, lbl):\n pts_colors = np.zeros((len(pts), 3), np.float)\n\n for i in range(0, len(self.colors)):\n indexes = np.where(lbl == i+1)[0]\n pts_colors[indexes] = self.colors[i]\n\n return pts_colors\n \n def BoxPts(self, bBox):\n box = [[bBox[0], bBox[2], bBox[4]], \n [bBox[1], bBox[2], bBox[4]], \n [bBox[0], bBox[3], bBox[4]], \n [bBox[1], bBox[3], bBox[4]],\n [bBox[0], bBox[2], bBox[5]], \n [bBox[1], bBox[2], bBox[5]], \n [bBox[0], bBox[3], bBox[5]], \n [bBox[1], bBox[3], bBox[5]]]\n return np.array(box)\n\n def AnimationFunction(self, vis):\n # time.sleep(0.2)\n if(self.i < len(self.boxes)): \n pts = self.src_points[:, :2]\n mask_x = np.logical_and(self.boxes[self.i][0]<pts[:,0], pts[:,0]<self.boxes[self.i][1])\n mask_y = np.logical_and(self.boxes[self.i][2]<pts[:,1], pts[:,1]<self.boxes[self.i][3])\n ptsIdx = np.where(np.logical_and(mask_x, mask_y))[0]\n randIdx = np.random.choice(ptsIdx, min(8192, len(ptsIdx)), replace=False)\n \n self.dst_points = np.concatenate((self.dst_points, self.src_points[randIdx]), axis = 0)\n self.dst_colors = np.concatenate((self.dst_colors, self.ColorPtsByClass(self.src_points[randIdx], self.src_labels[randIdx])), axis = 0)\n\n self.src_points = np.delete(self.src_points, randIdx, axis = 0)\n self.src_labels = np.delete(self.src_labels, randIdx, axis = 0)\n self.src_colors = np.delete(self.src_colors, randIdx, axis = 0)\n \n self.pc.points = o3d.utility.Vector3dVector(np.concatenate((self.src_points, self.dst_points), 0))\n self.pc.colors = o3d.utility.Vector3dVector(np.concatenate((self.src_colors, self.dst_colors), 0))\n\n self.box.points = o3d.utility.Vector3dVector(self.BoxPts(self.boxes[self.i]))\n\n vis.clear_geometries()\n vis.add_geometry(self.pc, False)\n vis.add_geometry(self.box, False)\n \n self.i += 1 \n # print(f\"{self.i}/{len(self.boxes)}\", end=\"\\r\")\n else:\n print(\"Iteration over.\")\n\n if(not os.path.exists(\"./data/camera_trajectory.json\")):\n self.trajectory = None\n\n if(self.trajectory is None):\n # vis = LoadRenderOptions(vis, returnVis=True)\n if(os.path.exists(\"./data/camera_trajectory.json\")):\n self.trajectory = o3d.io.read_pinhole_camera_trajectory(\"./data/camera_trajectory.json\").parameters\n self.trajectory_i = 0\n self.trajectory_time = time.time() \n else:\n ctr = vis.get_view_control()\n ctr.convert_from_pinhole_camera_parameters(self.trajectory[self.trajectory_i])\n if(self.trajectory_i < len(self.trajectory)-1): #and time.time() - self.trajectory_time > 1\n print(f\"Trajectory: {self.trajectory_i}/{len(self.trajectory)}\", end=\"\\r\")\n self.trajectory_i += 1\n self.trajectory_time = time.time()\n\n return False\n\ndef ShowSequenceBoxes(ptsFile, lblFile, consts):\n from dataTool import DataTool\n\n consts.test_step = 4\n seq = TestSequence(ptsFile, consts, windowsMachineCap=False)\n\n minZ = np.min(seq.xyzrgb[:,2])\n maxZ = np.max(seq.xyzrgb[:,2])\n\n boxes = []\n for pt in seq.pts:\n minX = pt[0] - consts.blocksize/2\n maxX = pt[0] + consts.blocksize/2\n \n minY = pt[1] - consts.blocksize/2\n maxY = pt[1] + consts.blocksize/2\n\n boxes.append([minX, maxX, minY, maxY, minZ, maxZ])\n\n dt = DataTool()\n # dt.VisualizePointCloud([seq.xyzrgb[:,:3]], [seq.xyzrgb[:,3:6]], bBoxes = boxes)\n boxesitr = BoxesIterator(boxes, seq.xyzrgb[:,:3], seq.xyzrgb[:,3:], np.squeeze(ReadLabels(lblFile),1))\n dt.VisualizePointCloud([seq.xyzrgb[:,:3]], animationFunction=boxesitr.AnimationFunction)\n # dt.VisualizePointCloud([seq.xyzrgb[:,:3]])\n\ndef RunExperiments():\n from dataTool import VisualizePointCloudClassesAsync, VisualizePointCloudClasses, ReadLabels, DataTool, ReadXYZ\n # testCloud = \"G:/PointCloud DataSets/NPM3D/test_10_classes/ajaccio_2.ply\"\n # testCloud = consts.Paths.processedTrain+\"/Lille1_1_0.npy\"\n # VisualizePointCloudClassesAsync(testCloud, downSample=False, windowName=\"Keras\")\n # VisualizePointCloudClassesAsync(testCloud, \"G:/PointCloud DataSets/NPM3D/generatedResults/ajaccio_2.txt\", downSample=False, windowName=\"Keras\")\n # VisualizePointCloudClassesAsync(testCloud, \"G:/PointCloud DataSets/NPM3D/torch_generated_data/results88.2%/ajaccio_2.txt\", downSample=False, windowName=\"Torch\")\n\n # TestTestSequence(consts.Paths.processedTrain+\"/Lille1_1_0.npy\", consts)\n # ShowSequenceBoxes(consts.Paths.processedTrain+\"/Lille1_1_0.npy\", consts)\n\n # # pts = ReadXYZ(consts.Paths.processedTrain+\"/Lille2_0.npy\")\n # true = ReadLabels(consts.Paths.processedTrain+\"/Lille2_0.npy\")\n\n # # pts = ReadXYZ(consts.Paths.rawTrain+\"/untermaederbrunnen_station3_xyz_intensity_rgb.hdf5\")\n # # true = ReadLabels(consts.Paths.rawTrain+\"/untermaederbrunnen_station3_xyz_intensity_rgb.hdf5\")\n\n # # pred_file = \"G:/PointCloud DataSets/NPM3D/torch_generated_data/results88.2%/Lille2_0.txt\"\n # pred_file = consts.Paths.generatedTest+\"/\"+Paths.FileName(modelPath)+\"/Lille2_0.txt\"\n # # pred_file = consts.Paths.generatedTest+\"/\"+Paths.FileName(modelPath)+\"/untermaederbrunnen_station3_xyz_intensity_rgb.labels\"\n # pred = ReadLabels(pred_file)\n \n # VisualizePointCloudClasses(consts.Paths.processedTrain+\"/Lille2_0.npy\",\n # pred_file,\n # downSample=False, windowName=\"Red error\",\n # errorPoints = ((true != pred) == (true != 0)),\n # delPoints = (true == 0))\n\n # error = np.where(true == 0)[0]\n # true = np.delete(true, error, 0)\n # pred = np.delete(pred, error, 0)\n\n # from sklearn.metrics import confusion_matrix\n # import metrics\n # cm = confusion_matrix(true, pred, labels=list(range(consts.classCount)))\n # iou = metrics.stats_iou_per_class(cm)\n # print(\"Mean iou:\", iou[0])\n # print(\"iou per class:\", iou[1])\n\n from dataTool import ReadXYZ, ReadLabels\n from sklearn.metrics import confusion_matrix\n from metrics import stats_accuracy_per_class, stats_iou_per_class\n\n src_pts = ReadXYZ(r\"G:\\PointCloud DataSets\\semantic3d\\rawTrain\\bildstein_station3_xyz_intensity_rgb.hdf5\")\n src_lbl = ReadLabels(r\"G:\\PointCloud DataSets\\semantic3d\\rawTrain\\bildstein_station3_xyz_intensity_rgb.hdf5\")\n src_lbl = np.squeeze(src_lbl, 1)\n\n delIndices = np.where(src_lbl == 0)\n src_pts = np.delete(src_pts, delIndices, axis=0)\n src_lbl = np.delete(src_lbl, delIndices, axis=0)\n\n voxel_pts = ReadXYZ(r\"G:\\PointCloud DataSets\\semantic3d\\processedTrain(0.15m)\\bildstein_station3_xyz_intensity_rgb_voxels.npy\")\n voxel_lbl = ReadLabels(r\"G:\\PointCloud DataSets\\semantic3d\\processedTrain(0.15m)\\bildstein_station3_xyz_intensity_rgb_voxels.npy\")\n voxel_lbl = np.squeeze(voxel_lbl, 1)\n\n upscaled_lbl = UpscaleToOriginal(src_pts, voxel_pts, voxel_lbl)\n\n cm = confusion_matrix(src_lbl, upscaled_lbl)\n avg_acc, avg_class = stats_accuracy_per_class(cm)\n avg_iou, avg_iou_class = stats_iou_per_class(cm)\n\ndef RenameSemantic3DFiles(folder):\n if(len(Paths.GetFiles(folder, findExtesions = \".labels\")) == 0):\n print(\"No files found.\")\n return\n\n for file in Paths.GetFiles(folder, findExtesions = \".labels\"):\n if(Paths.FileName(file).endswith(\"(1)\")):\n os.remove(file)\n else:\n name = Paths.FileName(file)\n newFileName = file.replace(name, Semantic3D.fileNames[name])\n os.rename(file, newFileName)\n\n if(os.path.getsize(newFileName) == 0):\n print(f\"{newFileName} if 0 bytes size\")\n \n if(len(Paths.GetFiles(folder, findExtesions = \".labels\")) != 15):\n print(\"Wrong number of files.\")\n else:\n print(\"Done renaming: \", folder)\n\nif __name__ == \"__main__\":\n from NearestNeighbors import NearestNeighborsLayer, SampleNearestNeighborsLayer\n from KDTree import KDTreeLayer, KDTreeSampleLayer\n modelPath = None\n\n # consts = NPM3D()\n # consts = Semantic3D()\n consts = Curbs()\n\n consts.noFeature = True\n # consts.Fusion = True\n # consts.Scale = True\n consts.Rotate = True\n # consts.Mirror = True\n # consts.Jitter = True\n # consts.FtrAugment = True\n\n testFiles = consts.TestFiles()\n trainFiles = consts.TrainFiles()\n\n modelPath = \"Sem3D(vox)(fusion)(FullAugment)_3_train(86.2)_val(79.5).h5\"\n # modelPath = \"Curbs(7&1)(noFeature)(Rotate)_21bdbe6aa82d4e259526ab46577e795a_25_train(75.1)_val(60.7).h5\"\n # modelPath = [\"Sem3D(vox)(RGB)(FullAugment)_55_train(85.7)_val(79.9)\", \"Sem3D(NOCOL)_50_train(87.4)_val(69.1)\"]\n # modelPath = [\"NPM3D(80&5)(RGB)(NoScale)_28_train(88.3)_val(73.2).h5\", \"NPM3D(80&5)(NOCOL)(FullAugment)_28_train(87.3)_val(71.5).h5\"]\n # modelPath = LatestModel(\"Sem3D(14&1)(noFeature)(Scale)(Rotate)(Mirror)(Jitter)\")\n # modelPath = LatestModel(consts.Name()) \n\n if(isinstance(modelPath,list)):\n consts.Fusion = True\n\n if(not consts.Fusion and not Const.IsWindowsMachine()):\n tf.config.optimizer.set_jit(True) #Gives more than 10% boost!!!\n print(\"XLA enabled.\")\n\n # modelPath = [\"Sem3D(14&1)(noFeature)(Scale)(Rotate)(Mirror)(Jitter)_9bbee708a7814063af9d85070452abd8_59_train(85.2)_val(72.8)\", \n # \"Sem3D(14&1)(noFeature)(Rotate)(Mirror)(Jitter)_ff2eb229084247d9a1c63caa519e9890_58_train(84.9)_val(75.5)\",\n # \"Sem3D(14&1)(noFeature)_dffc17f77e924894bbdbdad818ab6994_40_train(85.1)_val(68.8)\"]\n # EvaluateModels([modelPath], testFiles, consts)\n\n TrainModel(trainFiles, testFiles, consts, modelPath = modelPath)# , epochs = 8) #continue train\n # TrainModel(trainFiles, testFiles, consts) #new model\n\n # modelPath = HighestValMIOUModel(\"NPM3D(80&5)(fusion)(FullAugment)\")\n\n #NPM3D\n # GenerateData(modelPath, Paths.GetFiles(consts.Paths.rawTest), consts, consts.Paths.generatedTest)\n \n #Semantic3D \n # GenerateLargeData(modelPath, Paths.Semantic3D.processedTest, Paths.Semantic3D.rawTest, consts, consts.Paths.generatedTest, Upscale=False)\n # UpscaleFilesAsync(modelPath, Paths.Semantic3D.processedTest, Paths.Semantic3D.rawTest, Paths.Semantic3D.generatedTest)\n # RenameSemantic3DFiles(Paths.Semantic3D.generatedTest + Paths.FileName(modelPath))\n\n #Curbs\n EvaluateModels([modelPath], testFiles, consts)\n # GenerateData(modelPath, testFiles, consts, consts.Paths.pointCloudPath+\"/generated/\")\n GenerateLargeData(modelPath, testFiles, consts, consts.Paths.pointCloudPath+\"/generated/\")" ]
[ [ "numpy.save", "tensorflow.keras.optimizers.Adam", "tensorflow.summary.scalar", "tensorflow.reshape", "tensorflow.gather_nd", "tensorflow.matmul", "tensorflow.initializers.lecun_normal", "tensorflow.concat", "numpy.amax", "tensorflow.math.maximum", "tensorflow.nn.softmax", "tensorflow.reduce_sum", "tensorflow.math.confusion_matrix", "numpy.logical_and", "numpy.concatenate", "numpy.abs", "numpy.cos", "tensorflow.config.optimizer.set_jit", "numpy.random.choice", "numpy.logical_not", "numpy.expand_dims", "tensorflow.keras.layers.BatchNormalization", "numpy.delete", "numpy.random.rand", "tensorflow.random_uniform_initializer", "numpy.where", "tensorflow.transpose", "tensorflow.constant", "numpy.unique", "tensorflow.nn.relu", "numpy.random.uniform", "numpy.load", "tensorflow.keras.layers.Dropout", "tensorflow.constant_initializer", "numpy.zeros", "tensorflow.ones_like", "tensorflow.expand_dims", "numpy.argmax", "tensorflow.cast", "numpy.max", "numpy.min", "tensorflow.zeros", "numpy.random.shuffle", "tensorflow.math.reduce_sum", "tensorflow.equal", "tensorflow.keras.models.load_model", "numpy.squeeze", "pandas.DataFrame", "tensorflow.keras.losses.CategoricalCrossentropy", "sklearn.neighbors.KDTree", "tensorflow.stop_gradient", "numpy.amin", "numpy.clip", "tensorflow.linalg.diag_part", "tensorflow.keras.losses.categorical_crossentropy", "numpy.array", "numpy.sin", "numpy.dot", "numpy.random.randint", "tensorflow.summary.create_file_writer", "tensorflow.keras.layers.Input" ] ]
neblar/numpynn
[ "b33c5f671c8e835b55ed775ababa358e14c987bc" ]
[ "test/test_conv1dt_6.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom ..src.Conv1DT_6 import Conv1DT as NumpyConv1DT\n\n\nclass Tester:\n conv1dt_numpy = NumpyConv1DT()\n\n def y_torch(self, x, weight, bias, stride, padding):\n x = torch.tensor(x)\n weight = torch.tensor(weight)\n bias = torch.tensor(bias)\n return F.conv_transpose1d(x, weight, bias, stride, padding).numpy()\n\n def y_numpy(self, x, weight, bias, stride, padding):\n return self.conv1dt_numpy(x, weight, bias, stride, padding)\n\n def __call__(self, inchan, outchan, kernel_len, stride, padding):\n in_len = np.random.randint(7, 64)\n x = np.random.randn(inchan, in_len)\n W = np.random.randn(inchan, outchan, kernel_len)\n B = np.random.randn(outchan)\n y1 = self.y_torch(x[None], W, B, stride, padding)[0]\n y2 = self.y_numpy(x, W, B, stride, padding)\n print(y1.shape, y2.shape)\n assert np.allclose(y1, y2)\n\n\ndef test():\n tester = Tester()\n for _ in range(32):\n tester(1, 1, 1, 1, 0)\n tester(1, 1, 2, 1, 0)\n tester(1, 1, 3, 1, 0)\n tester(4, 1, 3, 1, 0)\n tester(1, 2, 3, 1, 0)\n tester(1, 1, 4, 1, 0)\n tester(1, 2, 5, 1, 0)\n tester(1, 2, 7, 1, 0)\n\n tester(1, 1, 1, 2, 0)\n tester(1, 1, 2, 2, 0)\n tester(1, 1, 4, 3, 0)\n tester(4, 8, 4, 3, 0)\n tester(1, 1, 1, 1, 1)\n tester(1, 1, 3, 1, 1)\n tester(1, 1, 3, 2, 1)\n tester(1, 1, 3, 2, 2)\n\n tester(512, 256, 3, 1, 1)\n tester(256, 256, 3, 1, 1)\n tester(80, 80, 3, 1, 1)\n tester(512, 128, 13, 1, 6)\n tester(128, 128, 11, 1, 5)\n tester(128, 128, 9, 1, 4)\n tester(128, 128, 7, 1, 3)\n tester(128, 128, 5, 1, 2)\n tester(128, 128, 3, 1, 1)\n tester(128, 1, 1, 1, 0)\n\n tester(64, 32, 4, 2, 1)\n tester(128, 64, 4, 2, 1)\n tester(256, 128, 16, 8, 4)\n tester(512, 256, 16, 8, 4)\n" ]
[ [ "numpy.allclose", "numpy.random.randn", "torch.tensor", "torch.nn.functional.conv_transpose1d", "numpy.random.randint" ] ]
wangby511/Extreme-Dark-Video-Enhancement
[ "e0de50428d74a7cec2ee87b63e9fce9860dfd590" ]
[ "state-of-the-art/bmvc18/psnr_ssim_mabd.py" ]
[ "import os, glob, time\n\nimport tensorflow as tf\nimport numpy as np\nfrom skvideo.io import vread, vwrite\n\n\ndirectory = 'test_set_results/'\n\nTEST_RESULT_DIR = './result_MBLLVEN_raw_he2he/test/'\n\nMAX_VAL = 255\n\nsess = tf.Session()\nt_vid1 = tf.placeholder(tf.uint8, [None, None, None, None])\nt_vid2 = tf.placeholder(tf.uint8, [None, None, None, None])\nt_psnr = tf.reduce_mean(tf.image.psnr(t_vid1, t_vid2, MAX_VAL))\nt_ssim = tf.reduce_mean(tf.image.ssim(t_vid1, t_vid2, MAX_VAL))\n\n\ndef get_psnr_ssim(sess, vid1, vid2):\n assert vid1.shape[0] == vid2.shape[0]\n psnr = 0\n ssim = 0\n N = 20\n for i in range(vid1.shape[0] / N):\n psnr += sess.run(t_psnr, feed_dict={t_vid1: vid1[i * N:(i + 1) * N], t_vid2: vid2[i * N:(i + 1) * N]})\n ssim += sess.run(t_ssim, feed_dict={t_vid1: vid1[i * N:(i + 1) * N], t_vid2: vid2[i * N:(i + 1) * N]})\n return psnr / vid1.shape[0] * N, ssim / vid1.shape[0] * N\n\n\ndef brightness(vid):\n R, G, B = vid[:, :, :, 0], vid[:, :, :, 1], vid[:, :, :, 2]\n return (0.2126 * R + 0.7152 * G + 0.0722 * B) # refer to https://en.wikipedia.org/wiki/Relative_luminance\n\n\ndef get_mse_mabd(vid1, vid2):\n b_vid1 = brightness(vid1)\n b_vid2 = brightness(vid2)\n mabd1 = abs(np.diff(b_vid1)).mean(axis=(1,2))\n mabd2 = abs(np.diff(b_vid2)).mean(axis=(1,2))\n return ((mabd1 - mabd2) ** 2).mean()\n\n\noutput_files = glob.glob(TEST_RESULT_DIR + '*')\ngt_files = [os.path.basename(file)[:-4] for file in output_files]\n\nif 'psnr_ssim_mabd' in os.listdir('.'):\n os.rename('psnr_ssim_mabd', 'psnr_ssim_mabd' + '_' + str(time.localtime().tm_mon).zfill(2) + str(time.localtime().tm_mday).zfill(2) + '-' + str(time.localtime().tm_hour).zfill(2) + str(time.localtime().tm_min).zfill(2))\n\nwith open('psnr_ssim_mabd', 'w') as f:\n pass\n\nall_psnr = 0\nall_ssim = 0\nall_mabd = 0\n\nfor output_file in output_files:\n out_vid = vread(output_file)\n gt_file = os.path.basename(output_file)[:-4] + '.npy'\n gt_vid = np.load('../../0_data/gt_he/' + gt_file)\n t0 = time.time()\n psnr, ssim = get_psnr_ssim(sess, out_vid, gt_vid)\n t1 = time.time()\n mabd = get_mse_mabd(out_vid, gt_vid)\n t2 = time.time()\n print('Done.\\t{}s\\t{}s'.format(t1 - t0, t2 - t1))\n with open('psnr_ssim_mabd', 'a') as f:\n f.write(os.path.basename(output_file)[:-4] + ' ' + str(psnr) + ' ' + str(ssim) + ' ' + str(mabd) + '\\n')\n all_psnr += psnr\n all_ssim += ssim\n all_mabd += mabd\n\nwith open('psnr_ssim_mabd', 'a') as f:\n f.write('\\n' * 3 + 'overall_average ' + str(all_psnr / len(gt_files)) + ' ' + str(all_ssim / len(gt_files)) + ' ' + str(all_mabd / len(gt_files)) + '\\n')\n" ]
[ [ "numpy.load", "tensorflow.placeholder", "numpy.diff", "tensorflow.image.psnr", "tensorflow.image.ssim", "tensorflow.Session" ] ]
huyhoang17/DB_text_minimal
[ "0d1466889b21cb74a0571a0fb3856902739ea523" ]
[ "src/data_loaders.py" ]
[ "import os\nimport glob\nimport math\n\nimport hydra\nimport cv2\nimport numpy as np\nfrom shapely.geometry import Polygon\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport imgaug.augmenters as iaa\nimport pyclipper\n\nimport db_transforms\nfrom utils import dict_to_device, minmax_scaler_img\n\n\nclass BaseDatasetIter(Dataset):\n def __init__(self,\n train_dir,\n train_gt_dir,\n ignore_tags,\n is_training=True,\n image_size=640,\n min_text_size=8,\n shrink_ratio=0.4,\n thresh_min=0.3,\n thresh_max=0.7,\n augment=None,\n mean=[103.939, 116.779, 123.68],\n debug=False):\n\n self.train_dir = train_dir\n self.train_gt_dir = train_gt_dir\n self.ignore_tags = ignore_tags\n\n self.is_training = is_training\n self.image_size = image_size\n self.min_text_size = min_text_size\n self.shrink_ratio = shrink_ratio\n self.thresh_min = thresh_min\n self.thresh_max = thresh_max\n self.augment = augment\n if self.augment is None:\n self.augment = self._get_default_augment()\n\n self.mean = mean\n self.debug = debug\n\n # load metadata\n self.image_paths, self.gt_paths = self.load_metadata(\n train_dir, train_gt_dir)\n\n # load annotation\n self.all_anns = self.load_all_anns(self.gt_paths)\n assert len(self.image_paths) == len(self.all_anns)\n\n def _get_default_augment(self):\n augment_seq = iaa.Sequential([\n iaa.Fliplr(0.5),\n iaa.Affine(rotate=(-10, 10)),\n iaa.Resize((0.5, 3.0))\n ])\n return augment_seq\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, index):\n\n image_path = self.image_paths[index]\n anns = self.all_anns[index]\n\n if self.debug:\n print(image_path)\n print(len(anns))\n\n img = cv2.imread(image_path)[:, :, ::-1]\n if self.is_training and self.augment is not None:\n augment_seq = self.augment.to_deterministic()\n img, anns = db_transforms.transform(augment_seq, img, anns)\n img, anns = db_transforms.crop(img, anns)\n\n img, anns = db_transforms.resize(self.image_size, img, anns)\n\n anns = [ann for ann in anns if Polygon(ann['poly']).buffer(0).is_valid]\n gt = np.zeros((self.image_size, self.image_size),\n dtype=np.float32) # batch_gts\n mask = np.ones((self.image_size, self.image_size), dtype=np.float32)\n thresh_map = np.zeros((self.image_size, self.image_size),\n dtype=np.float32) # batch_thresh_maps\n # batch_thresh_masks\n thresh_mask = np.zeros((self.image_size, self.image_size),\n dtype=np.float32)\n\n if self.debug:\n print(type(anns), len(anns))\n\n ignore_tags = []\n for ann in anns:\n # i.e shape = (4, 2) / (6, 2) / ...\n poly = np.array(ann['poly'])\n height = max(poly[:, 1]) - min(poly[:, 1])\n width = max(poly[:, 0]) - min(poly[:, 0])\n polygon = Polygon(poly)\n\n # generate gt and mask\n if polygon.area < 1 or \\\n min(height, width) < self.min_text_size or \\\n ann['text'] in self.ignore_tags:\n ignore_tags.append(True)\n cv2.fillPoly(mask, poly.astype(np.int32)[np.newaxis, :, :], 0)\n continue\n else:\n # 6th equation\n distance = polygon.area * \\\n (1 - np.power(self.shrink_ratio, 2)) / polygon.length\n subject = [tuple(_l) for _l in ann['poly']]\n padding = pyclipper.PyclipperOffset()\n padding.AddPath(subject, pyclipper.JT_ROUND,\n pyclipper.ET_CLOSEDPOLYGON)\n shrinked = padding.Execute(-distance)\n\n if len(shrinked) == 0:\n ignore_tags.append(True)\n cv2.fillPoly(mask,\n poly.astype(np.int32)[np.newaxis, :, :], 0)\n continue\n else:\n shrinked = np.array(shrinked[0]).reshape(-1, 2)\n if shrinked.shape[0] > 2 and \\\n Polygon(shrinked).buffer(0).is_valid:\n ignore_tags.append(False)\n cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1)\n else:\n ignore_tags.append(True)\n cv2.fillPoly(mask,\n poly.astype(np.int32)[np.newaxis, :, :],\n 0)\n continue\n\n # generate thresh map and thresh mask\n db_transforms.draw_thresh_map(ann['poly'],\n thresh_map,\n thresh_mask,\n shrink_ratio=self.shrink_ratio)\n\n thresh_map = thresh_map * \\\n (self.thresh_max - self.thresh_min) + self.thresh_min\n\n img = img.astype(np.float32)\n img[..., 0] -= self.mean[0]\n img[..., 1] -= self.mean[1]\n img[..., 2] -= self.mean[2]\n\n img = np.transpose(img, (2, 0, 1))\n\n data_return = {\n \"image_path\": image_path,\n \"img\": img,\n \"prob_map\": gt,\n \"supervision_mask\": mask,\n \"thresh_map\": thresh_map,\n \"text_area_map\": thresh_mask,\n }\n # for batch_size = 1\n if not self.is_training:\n data_return[\"anns\"] = [ann['poly'] for ann in anns]\n data_return[\"ignore_tags\"] = ignore_tags\n\n # return image_path, img, gt, mask, thresh_map, thresh_mask\n return data_return\n\n\nclass TotalTextDatasetIter(BaseDatasetIter):\n def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):\n super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)\n\n def load_metadata(self, img_dir, gt_dir):\n img_fps = sorted(glob.glob(os.path.join(img_dir, \"*\")))\n gt_fps = []\n for img_fp in img_fps:\n img_id = img_fp.split(\"/\")[-1].replace(\"img\", \"\").split(\".\")[0]\n gt_fn = \"gt_img{}.txt\".format(img_id)\n gt_fp = os.path.join(gt_dir, gt_fn)\n assert os.path.exists(img_fp)\n gt_fps.append(gt_fp)\n assert len(img_fps) == len(gt_fps)\n\n return img_fps, gt_fps\n\n def load_all_anns(self, gt_paths):\n res = []\n for gt in gt_paths:\n lines = []\n reader = open(gt, 'r').readlines()\n for line in reader:\n item = {}\n parts = line.strip().split(',')\n label = parts[-1]\n line = [i.strip('\\ufeff').strip('\\xef\\xbb\\xbf') for i in parts]\n num_points = math.floor((len(line) - 1) / 2) * 2\n poly = np.array(list(map(float, line[:num_points]))).reshape(\n (-1, 2)).tolist()\n if len(poly) < 3:\n continue\n item['poly'] = poly\n item['text'] = label\n lines.append(item)\n res.append(lines)\n return res\n\n\nclass CTW1500DatasetIter(BaseDatasetIter):\n def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):\n super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)\n\n def load_metadata(self, img_dir, gt_dir):\n img_fps = sorted(glob.glob(os.path.join(img_dir, \"*\")))\n gt_fps = []\n for img_fp in img_fps:\n img_id = img_fp.split(\"/\")[-1][:-4]\n gt_fn = \"{}.txt\".format(img_id)\n gt_fp = os.path.join(gt_dir, gt_fn)\n assert os.path.exists(img_fp)\n gt_fps.append(gt_fp)\n assert len(img_fps) == len(gt_fps)\n\n return img_fps, gt_fps\n\n def load_all_anns(self, gt_fps):\n \"\"\"\n Reference: https://github.com/whai362/PSENet/blob/master/dataset/ctw1500_loader.py\n \"\"\"\n res = []\n for gt_fp in gt_fps:\n lines = []\n with open(gt_fp, 'r') as f:\n for line in f:\n item = {}\n gt = line.strip().strip('\\ufeff').strip('\\xef\\xbb\\xbf')\n gt = list(map(int, gt.split(',')))\n\n x1 = np.int(gt[0])\n y1 = np.int(gt[1])\n bbox = [np.int(gt[i]) for i in range(4, 32)]\n bbox = np.asarray(bbox) + ([x1, y1] * 14)\n bbox = bbox.reshape(-1, 2).tolist()\n item['poly'] = bbox\n item['text'] = 'True'\n lines.append(item)\n res.append(lines)\n return res\n\n\nclass ICDAR2015DatasetIter(BaseDatasetIter):\n def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):\n super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)\n\n def load_metadata(self, img_dir, gt_dir):\n img_fps = glob.glob(os.path.join(img_dir, \"*\"))\n gt_fps = []\n for img_fp in img_fps:\n img_id = img_fp.split(\"/\")[-1].split(\".\")[0]\n gt_fn = \"gt_{}.txt\".format(img_id)\n gt_fp = os.path.join(gt_dir, gt_fn)\n assert os.path.exists(img_fp)\n gt_fps.append(gt_fp)\n assert len(img_fps) == len(gt_fps)\n\n return img_fps, gt_fps\n\n def load_all_anns(self, gt_fps):\n res = []\n for gt_fp in gt_fps:\n lines = []\n with open(gt_fp, 'r') as f:\n for line in f:\n item = {}\n gt = line.strip().strip('\\ufeff').strip(\n '\\xef\\xbb\\xbf').split(\",\")\n label = \",\".join(gt[8:])\n poly = list(map(int, gt[:8]))\n poly = np.asarray(poly).reshape(-1, 2).tolist()\n item['poly'] = poly\n item['text'] = label\n lines.append(item)\n res.append(lines)\n return res\n\n\nclass MSRATD500DatasetIter(BaseDatasetIter):\n def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):\n super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)\n\n def transform_four_points(self, points, center_point, theta):\n \"\"\"Reference: https://stackoverflow.com/questions/622140\n \"\"\"\n theta = -theta\n new_coords = []\n x_center, y_center = center_point\n\n for point in points:\n x, y = point\n x_new = x_center + (x - x_center) * np.cos(theta) + \\\n (y - y_center) * np.sin(theta)\n y_new = y_center - (x - x_center) * np.sin(theta) + \\\n (y - y_center) * np.cos(theta)\n x_new = int(x_new)\n y_new = int(y_new)\n new_coords.append((x_new, y_new))\n return new_coords\n\n def load_metadata(self, img_dir, gt_dir=None):\n # ignore gt_dir\n img_fps = sorted(glob.glob(os.path.join(img_dir, \"*.JPG\")))\n gt_fps = sorted(glob.glob(os.path.join(img_dir, \"*.gt\")))\n assert len(img_fps) == len(gt_fps)\n\n return img_fps, gt_fps\n\n def load_all_anns(self, gt_fps):\n res = []\n for gt_fp in gt_fps:\n lines = []\n with open(gt_fp, 'r') as f:\n for line in f:\n item = {}\n line = list(map(float, line.strip().split()))\n index, dif, x_min, y_min, w, h, theta = line\n if int(dif) == 1: # difficult label\n continue\n\n c1 = (x_min, y_min)\n c2 = (x_min + w, y_min)\n c3 = (x_min + w, y_min + h)\n c4 = (x_min, y_min + h)\n center = (x_min + w / 2, y_min + h / 2)\n rot_box = self.transform_four_points([c1, c2, c3, c4],\n center, theta)\n rot_box = np.array(rot_box).tolist()\n\n item['poly'] = rot_box\n item['text'] = 'True'\n lines.append(item)\n res.append(lines)\n return res\n\n\[email protected](config_path=\"../config.yaml\", strict=False)\ndef run(cfg):\n dataset_name = cfg.dataset.name\n ignore_tags = cfg.data[dataset_name].ignore_tags\n train_dir = cfg.data[dataset_name].train_dir\n train_gt_dir = cfg.data[dataset_name].train_gt_dir\n\n if dataset_name == 'totaltext':\n TextDatasetIter = TotalTextDatasetIter\n elif dataset_name == 'ctw1500':\n TextDatasetIter = CTW1500DatasetIter\n elif dataset_name == 'icdar2015':\n TextDatasetIter = ICDAR2015DatasetIter\n elif dataset_name == 'msra_td500':\n TextDatasetIter = MSRATD500DatasetIter\n else:\n raise NotImplementedError(\"Pls provide valid dataset name!\")\n train_iter = TextDatasetIter(train_dir,\n train_gt_dir,\n ignore_tags,\n is_training=True,\n debug=False)\n train_loader = DataLoader(dataset=train_iter,\n batch_size=1,\n shuffle=True,\n num_workers=1)\n samples = next(iter(train_loader))\n samples = dict_to_device(samples, device='cpu')\n for k, v in samples.items():\n if isinstance(v, torch.Tensor):\n print(samples[k].device)\n import matplotlib.pyplot as plt\n plt.figure()\n plt.imshow(minmax_scaler_img(samples['img'][0].numpy().transpose(1, 2, 0)))\n plt.imshow(samples['prob_map'][0], cmap='jet', alpha=0.35)\n plt.imshow(samples['thresh_map'][0], cmap='jet', alpha=0.5)\n # plt.imshow(samples['text_area_map'][0], cmap='jet', alpha=0.5)\n # plt.imshow(samples['supervision_mask'][0], cmap='jet', alpha=0.5)\n plt.savefig(os.path.join(cfg.meta.root_dir, 'tmp/foo.jpg'),\n bbox_inches='tight')\n\n\nif __name__ == '__main__':\n run()\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.ones", "numpy.transpose", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.cos", "numpy.asarray", "matplotlib.pyplot.imshow", "numpy.power", "numpy.array", "numpy.sin", "numpy.int" ] ]
ghmole/akshare
[ "eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6" ]
[ "akshare/option/option_commodity.py" ]
[ "# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2021/1/14 20:50\nDesc: 商品期权数据\n说明:\n(1) 价格:自2019年12月02日起,纤维板报价单位由元/张改为元/立方米\n(2) 价格:元/吨,鸡蛋为元/500千克,纤维板为元/立方米,胶合板为元/张\n(3) 成交量、持仓量:手(按双边计算)\n(4) 成交额:万元(按双边计算)\n(5) 涨跌=收盘价-前结算价\n(6) 涨跌1=今结算价-前结算价\n(7) 合约系列:具有相同月份标的期货合约的所有期权合约的统称\n(8) 隐含波动率:根据期权市场价格,利用期权定价模型计算的标的期货合约价格波动率\n\"\"\"\nimport datetime\nimport warnings\nfrom io import StringIO, BytesIO\n\nimport requests\nimport pandas as pd\n\nfrom akshare.option.cons import (\n get_calendar,\n convert_date,\n DCE_DAILY_OPTION_URL,\n SHFE_OPTION_URL,\n CZCE_DAILY_OPTION_URL_3,\n SHFE_HEADERS,\n)\n\n\ndef get_dce_option_daily(trade_date=\"20200817\", symbol=\"聚丙烯期权\"):\n \"\"\"\n 大连商品交易所-期权-日频行情数据\n :param trade_date: str format:\"20191017\"\n :param symbol: str \"玉米期权\" or \"豆粕期权\" or \"铁矿石期权\", or \"液化石油气期权\" or \"聚乙烯期权\" or \"聚氯乙烯期权\" or \"聚丙烯期权\"\n :return: pandas.DataFrame\n part-1:\n 商品名称 合约名称 开盘价 最高价 最低价 收盘价 前结算价 结算价 涨跌 涨跌1 \\\n 0 玉米 c2001-C-1680 168.5 168.5 168.5 168.5 168.0 167.5 0.5 -0.5\n 1 玉米 c2001-C-1700 0 0.0 0.0 148.0 148.0 148.0 0.0 0.0\n 2 玉米 c2001-C-1720 0 0.0 0.0 129.0 128.0 129.0 1.0 1.0\n 3 玉米 c2001-C-1740 115 115.0 115.0 115.0 108.0 111.0 7.0 3.0\n 4 玉米 c2001-C-1760 89 95.5 89.0 95.5 89.0 93.5 6.5 4.5\n .. ... ... ... ... ... ... ... ... ... ...\n 239 玉米 c2009-P-2040 0 0.0 0.0 91.0 88.5 91.0 2.5 2.5\n 240 玉米 c2009-P-2060 0 0.0 0.0 106.0 104.0 106.0 2.0 2.0\n 241 玉米 c2009-P-2080 0 0.0 0.0 121.5 120.5 121.5 1.0 1.0\n 242 玉米 c2009-P-2100 0 0.0 0.0 138.5 137.5 138.5 1.0 1.0\n 243 玉米 c2009-P-2120 0 0.0 0.0 155.5 155.5 155.5 0.0 0.0\n Delta 成交量 持仓量 持仓量变化 成交额 行权量\n 0 0.98 2 236 0 0.34 0.0\n 1 0.96 0 236 0 0 0.0\n 2 0.94 0 210 0 0 0.0\n 3 0.90 20 1,040 0 2.3 0.0\n 4 0.85 12 680 0 1.11 0.0\n .. ... .. ... ... ... ...\n 239 -0.70 0 30 0 0 0.0\n 240 -0.75 0 50 0 0 0.0\n 241 -0.80 0 20 0 0 0.0\n 242 -0.84 0 10 0 0 0.0\n 243 -0.88 0 0 0 0 0.0\n\n part-2:\n 0 合约系列 隐含波动率(%)\n 1 c2001 12.95\n 2 c2003 8.74\n 3 c2005 8.75\n 4 c2007 7.7\n 5 c2009 6.85\n \"\"\"\n calendar = get_calendar()\n day = convert_date(trade_date) if trade_date is not None else datetime.date.today()\n if day.strftime(\"%Y%m%d\") not in calendar:\n warnings.warn(\"%s非交易日\" % day.strftime(\"%Y%m%d\"))\n return None\n url = DCE_DAILY_OPTION_URL\n payload = {\n \"dayQuotes.variety\": \"all\",\n \"dayQuotes.trade_type\": \"1\",\n \"year\": str(day.year),\n \"month\": str(day.month - 1),\n \"day\": str(day.day),\n \"exportFlag\": \"excel\",\n }\n res = requests.post(url, data=payload)\n table_df = pd.read_excel(BytesIO(res.content), header=0)\n another_df = table_df.iloc[\n table_df[table_df.iloc[:, 0].str.contains(\"合约\")].iloc[-1].name:, [0, 1]\n ]\n another_df.reset_index(inplace=True, drop=True)\n another_df.iloc[0] = another_df.iat[0, 0].split(\"\\t\")\n another_df.columns = another_df.iloc[0]\n another_df = another_df.iloc[1:, :]\n if symbol == \"豆粕期权\":\n return table_df[table_df[\"商品名称\"] == \"豆粕\"], another_df[another_df.iloc[:, 0].str.contains(\"m\")]\n elif symbol == \"玉米期权\":\n return table_df[table_df[\"商品名称\"] == \"玉米\"], another_df[another_df.iloc[:, 0].str.contains(\"c\")]\n elif symbol == \"铁矿石期权\":\n return table_df[table_df[\"商品名称\"] == \"铁矿石\"], another_df[another_df.iloc[:, 0].str.contains(\"i\")]\n elif symbol == \"液化石油气期权\":\n return table_df[table_df[\"商品名称\"] == \"液化石油气\"], another_df[another_df.iloc[:, 0].str.contains(\"pg\")]\n elif symbol == \"聚乙烯期权\":\n return table_df[table_df[\"商品名称\"] == \"聚乙烯\"], another_df[another_df.iloc[:, 0].str.contains(\"i\")]\n elif symbol == \"聚氯乙烯期权\":\n return table_df[table_df[\"商品名称\"] == \"聚氯乙烯\"], another_df[another_df.iloc[:, 0].str.contains(\"v\")]\n elif symbol == \"聚丙烯期权\":\n return table_df[table_df[\"商品名称\"] == \"聚丙烯\"], another_df[another_df.iloc[:, 0].str.contains(\"pp\")]\n\n\ndef get_czce_option_daily(trade_date=\"20191017\", symbol=\"白糖期权\"):\n \"\"\"\n 郑州商品交易所-期权-日频行情数据\n 说明:\n (1) 价格:元/吨\n (2) 成交量、空盘量:手\n (3) 成交额:万元\n (4) 涨跌一:今收盘-昨结算\n (5) 涨跌二:今结算-昨结算\n (6) 隐含波动率:将当日期权合约的结算价代入期权定价模型,反推出来的波动率数值\n :param trade_date: str \"20191017\"\n :param symbol: str \"白糖期权\", \"棉花期权\", \"甲醇期权\", \"PTA期权\", \"菜籽粕期权\"\n :return: pandas.DataFrame\n 郑商所每日期权交易数据\n 品种代码 昨结算 今开盘 最高价 最低价 今收盘 \\\n 0 CF001C10800 1,579.00 0.00 0.00 0.00 0.00\n 1 CF001C11000 1,392.00 0.00 0.00 0.00 0.00\n 2 CF001C11200 1,211.00 0.00 0.00 0.00 0.00\n 3 CF001C11400 1,038.00 1,396.00 1,396.00 1,396.00 1,396.00\n 4 CF001C11600 874.00 0.00 0.00 0.00 0.00\n .. ... ... ... ... ... ...\n 398 SR009P5900 576.00 0.00 0.00 0.00 0.00\n 399 SR009P6000 653.00 0.00 0.00 0.00 0.00\n 400 小计\n 401 SR合计\n 402 总计\n 今结算 涨跌1 涨跌2 成交量(手) 空盘量 增减量 \\\n 0 1,866.00 287.00 287.00 0 0 0\n 1 1,672.00 280.00 280.00 0 0 0\n 2 1,481.00 270.00 270.00 0 4 0\n 3 1,295.00 358.00 257.00 2 68 0\n 4 1,114.00 240.00 240.00 0 224 0\n .. ... ... ... ... ... ...\n 398 580.00 4.00 4.00 0 0 0\n 399 658.00 5.00 5.00 0 0 0\n 400 656 860 400\n 401 32,098 276,900 2252\n 402 110,664 474,154 14770\n 成交额(万元) DELTA 隐含波动率 行权量\n 0 0.00 0.9765 22.29 0\n 1 0.00 0.9621 21.84 0\n 2 0.00 0.9423 21.38 0\n 3 1.40 0.9155 20.91 0\n 4 0.00 0.8800 20.45 0\n .. ... ... ... ...\n 398 0.00 -0.6639 16.24 0\n 399 0.00 -0.7007 16.58 0\n 400 97.28 0\n 401 2138.41 0\n 402 8769.52 2\n \"\"\"\n calendar = get_calendar()\n day = convert_date(trade_date) if trade_date is not None else datetime.date.today()\n if day.strftime(\"%Y%m%d\") not in calendar:\n warnings.warn(\"{}非交易日\".format(day.strftime(\"%Y%m%d\")))\n return None\n if day > datetime.date(2010, 8, 24):\n url = CZCE_DAILY_OPTION_URL_3.format(day.strftime(\"%Y\"), day.strftime(\"%Y%m%d\"))\n try:\n r = requests.get(url)\n f = StringIO(r.text)\n table_df = pd.read_table(f, encoding=\"utf-8\", skiprows=1, sep=\"|\")\n if symbol == \"白糖期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"SR\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n elif symbol == \"PTA期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"TA\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n elif symbol == \"甲醇期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"MA\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n elif symbol == \"菜籽粕期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"RM\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n elif symbol == \"动力煤期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"ZC\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n else:\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"CF\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n except:\n return None\n\n\ndef get_shfe_option_daily(trade_date=\"20200827\", symbol=\"铝期权\"):\n \"\"\"\n 上海期货交易所-期权-日频行情数据\n :param trade_date: str \"20191017\"\n :param symbol: str \"铜期权\" or \"天胶期权\" or \"黄金期权\" or \"铝期权\" or \"锌期权\"\n :return: tuple(pandas.DataFrame)\n \"\"\"\n calendar = get_calendar()\n day = convert_date(trade_date) if trade_date is not None else datetime.date.today()\n if day.strftime(\"%Y%m%d\") not in calendar:\n warnings.warn(\"%s非交易日\" % day.strftime(\"%Y%m%d\"))\n return None\n if day > datetime.date(2010, 8, 24):\n url = SHFE_OPTION_URL.format(day.strftime(\"%Y%m%d\"))\n try:\n r = requests.get(url, headers=SHFE_HEADERS)\n json_data = r.json()\n table_df = pd.DataFrame(\n [\n row\n for row in json_data[\"o_curinstrument\"]\n if row[\"INSTRUMENTID\"] not in [\"小计\", \"合计\"]\n and row[\"INSTRUMENTID\"] != \"\"\n ]\n )\n contract_df = table_df[table_df[\"PRODUCTNAME\"].str.strip() == symbol]\n product_df = pd.DataFrame(json_data[\"o_curproduct\"])\n product_df = product_df[product_df[\"PRODUCTNAME\"].str.strip() == symbol]\n volatility_df = pd.DataFrame(json_data[\"o_cursigma\"])\n volatility_df = volatility_df[\n volatility_df[\"PRODUCTNAME\"].str.strip() == symbol\n ]\n contract_df.columns = [\n \"_\",\n \"_\",\n \"_\",\n \"合约代码\",\n \"前结算价\",\n \"开盘价\",\n \"最高价\",\n \"最低价\",\n \"收盘价\",\n \"结算价\",\n \"涨跌1\",\n \"涨跌2\",\n \"成交量\",\n \"持仓量\",\n \"持仓量变化\",\n \"_\",\n \"行权量\",\n \"成交额\",\n \"德尔塔\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n ]\n contract_df = contract_df[[\n \"合约代码\",\n \"开盘价\",\n \"最高价\",\n \"最低价\",\n \"收盘价\",\n \"前结算价\",\n \"结算价\",\n \"涨跌1\",\n \"涨跌2\",\n \"成交量\",\n \"持仓量\",\n \"持仓量变化\",\n \"成交额\",\n \"德尔塔\",\n \"行权量\",\n ]]\n\n volatility_df.columns = [\n \"_\",\n \"_\",\n \"_\",\n \"合约系列\",\n \"成交量\",\n \"持仓量\",\n \"持仓量变化\",\n \"行权量\",\n \"成交额\",\n \"隐含波动率\",\n \"_\",\n ]\n\n volatility_df = volatility_df[[\n \"合约系列\",\n \"成交量\",\n \"持仓量\",\n \"持仓量变化\",\n \"成交额\",\n \"行权量\",\n \"隐含波动率\",\n ]]\n return contract_df, volatility_df\n except:\n return None\n\n\nif __name__ == \"__main__\":\n get_czce_option_daily_df = get_czce_option_daily(trade_date=\"20200817\", symbol=\"动力煤期权\")\n print(get_czce_option_daily_df)\n get_dce_option_daily_one, get_dce_option_daily_two = get_dce_option_daily(trade_date=\"20210113\", symbol=\"玉米期权\")\n print(get_dce_option_daily_one)\n print(get_dce_option_daily_two)\n get_shfe_option_daily_one, get_shfe_option_daily_two = get_shfe_option_daily(trade_date=\"20210312\", symbol=\"天胶期权\")\n print(get_shfe_option_daily_one)\n print(get_shfe_option_daily_two)\n" ]
[ [ "pandas.read_table", "pandas.DataFrame" ] ]
xinjianlv/pycorrector
[ "697fc09032d129b2777cf686bb05663f2fc3c04f" ]
[ "pycorrector/transformers/models/bert_generation/modeling_bert_generation.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model specific for generation. \"\"\"\n\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions\nfrom ...modeling_utils import PreTrainedModel\nfrom ..bert.modeling_bert import BertEncoder\nfrom .configuration_bert_generation import BertGenerationConfig\n\nfrom pycorrector.utils.logger import logger\n\n\n_CONFIG_FOR_DOC = \"BertGenerationConfig\"\n_TOKENIZER_FOR_DOC = \"BertGenerationTokenizer\"\n\n\ndef load_tf_weights_in_bert_generation(\n model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False\n):\n try:\n import numpy as np\n import tensorflow.compat.v1 as tf\n\n import tensorflow_hub as hub\n import tensorflow_text # noqa: F401\n\n tf.disable_eager_execution()\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_model = hub.Module(tf_hub_path)\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n init.run()\n all_variables = tf_model.variable_map\n keep_track_variables = all_variables.copy()\n for key in list(all_variables.keys()):\n if \"global\" in key:\n logger.info(f\"Skipping {key}...\")\n continue\n if not is_encoder:\n model_pointer = getattr(model, model_class)\n else:\n model_pointer = model\n is_embedding = False\n logger.info(f\"Trying to match {key}...\")\n # remove start_string = \"module/bert/\"\n sub_layers = key.split(\"/\")[2:]\n if is_encoder_named_decoder and sub_layers[0] == \"encoder\":\n logger.info(f\"Skipping encoder layer {key} for decoder\")\n continue\n if is_encoder and sub_layers[0] == \"decoder\":\n logger.info(f\"Skipping decoder layer {key} for encoder\")\n continue\n for i, sub_layer in enumerate(sub_layers):\n if sub_layer == \"embeddings\":\n is_embedding = True\n elif sub_layer == \"LayerNorm\":\n is_embedding = False\n if \"layer\" in sub_layer:\n model_pointer = model_pointer.layer[int(sub_layer.split(\"_\")[-1])]\n elif sub_layer in [\"kernel\", \"gamma\"]:\n model_pointer = model_pointer.weight\n elif sub_layer == \"beta\":\n model_pointer = model_pointer.bias\n elif sub_layer == \"encdec\":\n model_pointer = model_pointer.crossattention.self\n elif sub_layer == \"encdec_output\":\n model_pointer = model_pointer.crossattention.output\n elif is_encoder_named_decoder and sub_layer == \"decoder\":\n model_pointer = model_pointer.encoder\n else:\n if sub_layer == \"attention\" and \"encdec\" in sub_layers[i + 1]:\n continue\n try:\n model_pointer = getattr(model_pointer, sub_layer)\n except AttributeError:\n logger.info(f\"Skipping to initialize {key} at {sub_layer}...\")\n raise AttributeError\n\n array = np.asarray(sess.run(all_variables[key]))\n if not is_embedding:\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, key))\n array = np.transpose(array)\n else:\n model_pointer = model_pointer.weight\n\n try:\n assert (\n model_pointer.shape == array.shape\n ), f\"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (model_pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {key}\")\n\n model_pointer.data = torch.from_numpy(array.astype(np.float32))\n keep_track_variables.pop(key, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(\", \".join(keep_track_variables.keys())))\n return model\n\n\nclass BertGenerationEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word and position embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n\n embeddings = inputs_embeds + position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertGenerationPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n model_files.\n \"\"\"\n\n config_class = BertGenerationConfig\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nBERT_GENERATION_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BertGenerationConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nBERT_GENERATION_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertGenerationTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.\",\n BERT_GENERATION_START_DOCSTRING,\n)\nclass BertGenerationEncoder(BertGenerationPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n This model should be used when leveraging Bert or Roberta checkpoints for the\n :class:`~transformers.EncoderDecoderModel` class as described in `Leveraging Pre-trained Checkpoints for Sequence\n Generation Tasks <https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n self.embeddings = BertGenerationEmbeddings(config)\n self.encoder = BertEncoder(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/bert_for_seq_generation_L-24_bbc_encoder\",\n output_type=BaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for\n tokens that are NOT MASKED, ``0`` for MASKED tokens.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask = None\n if not use_cache:\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[1:]\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=sequence_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\nclass BertGenerationOnlyLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n logits = self.decoder(hidden_states)\n return logits\n\n\n@add_start_docstrings(\n \"\"\"BertGeneration Model with a `language modeling` head on top for CLM fine-tuning. \"\"\",\n BERT_GENERATION_START_DOCSTRING,\n)\nclass BertGenerationDecoder(BertGenerationPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warn(\"If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`\")\n\n self.bert = BertGenerationEncoder(config)\n self.lm_head = BertGenerationOnlyLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig\n >>> import torch\n\n >>> tokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')\n >>> config = BertGenerationConfig.from_pretrained(\"google/bert_for_seq_generation_L-24_bbc_encoder\")\n >>> config.is_decoder = True\n >>> model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_token_type_ids=False, return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[1:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n" ]
[ [ "tensorflow.compat.v1.disable_eager_execution", "torch.ones", "tensorflow.compat.v1.Session", "torch.nn.Linear", "numpy.transpose", "torch.nn.Embedding", "torch.nn.CrossEntropyLoss", "torch.nn.LayerNorm", "tensorflow.compat.v1.global_variables_initializer", "torch.arange", "torch.zeros", "torch.nn.Dropout" ] ]
Ankur3107/zenml
[ "5dc05a833b50ac9cc49e851b9d91255da6016dfd" ]
[ "examples/functional_api/chapter_4.py" ]
[ "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.base import ClassifierMixin\nfrom sklearn.linear_model import LogisticRegression\n\nfrom zenml.integrations.constants import SKLEARN, TENSORFLOW\nfrom zenml.pipelines import pipeline\nfrom zenml.repository import Repository\nfrom zenml.steps import BaseStepConfig, Output, step\n\n\nclass TrainerConfig(BaseStepConfig):\n \"\"\"Trainer params\"\"\"\n\n epochs: int = 1\n gamma: float = 0.7\n lr: float = 0.001\n\n\n@step\ndef importer_mnist() -> Output(\n X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray\n):\n \"\"\"Download the MNIST data store it as an artifact\"\"\"\n (X_train, y_train), (\n X_test,\n y_test,\n ) = tf.keras.datasets.mnist.load_data()\n return X_train, y_train, X_test, y_test\n\n\n@step\ndef normalize_mnist(\n X_train: np.ndarray, X_test: np.ndarray\n) -> Output(X_train_normed=np.ndarray, X_test_normed=np.ndarray):\n \"\"\"Normalize the values for all the images so they are between 0 and 1\"\"\"\n X_train_normed = X_train / 255.0\n X_test_normed = X_test / 255.0\n return X_train_normed, X_test_normed\n\n\n@step\ndef tf_trainer(\n config: TrainerConfig,\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> tf.keras.Model:\n \"\"\"Train a neural net from scratch to recognize MNIST digits return our\n model or the learner\"\"\"\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(10, activation=\"relu\"),\n tf.keras.layers.Dense(10),\n ]\n )\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(0.001),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n\n model.fit(\n X_train,\n y_train,\n epochs=config.epochs,\n )\n\n # write model\n return model\n\n\n@step\ndef tf_evaluator(\n X_test: np.ndarray,\n y_test: np.ndarray,\n model: tf.keras.Model,\n) -> float:\n \"\"\"Calculate the loss for the model for each epoch in a graph\"\"\"\n\n _, test_acc = model.evaluate(X_test, y_test, verbose=2)\n return test_acc\n\n\n@step\ndef sklearn_trainer(\n config: TrainerConfig,\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> ClassifierMixin:\n \"\"\"Train SVC from sklearn.\"\"\"\n clf = LogisticRegression(penalty=\"l1\", solver=\"saga\", tol=0.1)\n clf.fit(X_train.reshape((X_train.shape[0], -1)), y_train)\n return clf\n\n\n@step\ndef sklearn_evaluator(\n X_test: np.ndarray,\n y_test: np.ndarray,\n model: ClassifierMixin,\n) -> float:\n \"\"\"Calculate accuracy score with classifier.\"\"\"\n\n test_acc = model.score(X_test.reshape((X_test.shape[0], -1)), y_test)\n return test_acc\n\n\n@pipeline(required_integrations=[SKLEARN, TENSORFLOW])\ndef mnist_pipeline(\n importer,\n normalizer,\n trainer,\n evaluator,\n):\n # Link all the steps artifacts together\n X_train, y_train, X_test, y_test = importer()\n X_trained_normed, X_test_normed = normalizer(X_train=X_train, X_test=X_test)\n model = trainer(X_train=X_trained_normed, y_train=y_train)\n evaluator(X_test=X_test_normed, y_test=y_test, model=model)\n\n\n# Run the pipeline\n# Initialize a pipeline run\ntf_p = mnist_pipeline(\n importer=importer_mnist(),\n normalizer=normalize_mnist(),\n trainer=tf_trainer(config=TrainerConfig(epochs=1)),\n evaluator=tf_evaluator(),\n)\n\n# Run the pipeline\ntf_p.run()\n\n\n# Initialize a new pipeline run\nscikit_p = mnist_pipeline(\n importer=importer_mnist(),\n normalizer=normalize_mnist(),\n trainer=sklearn_trainer(config=TrainerConfig()),\n evaluator=sklearn_evaluator(),\n)\n\n# Run the new pipeline\nscikit_p.run()\n\n# Post execution flow\nrepo = Repository()\np = repo.get_pipeline(pipeline_name=\"mnist_pipeline\")\nprint(f\"Pipeline `mnist_pipeline` has {len(p.runs)} run(s)\")\nfor r in p.runs[0:2]:\n eval_step = r.get_step(\"evaluator\")\n print(\n f\"For {eval_step.entrypoint_name}, the accuracy is: \"\n f\"{eval_step.output.read():.2f}\"\n )\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.keras.layers.Dense", "sklearn.linear_model.LogisticRegression" ] ]
ersilia-os/osm-series4-candidates-2
[ "a0b7f55d79c65182dcc4c102791d2ababbfb176e" ]
[ "scripts/4_similarity.py" ]
[ "from __init__ import OUTPUT\n\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.DataStructs import BulkTanimotoSimilarity\nimport os, sys\n\nprint(\"SIMILARITY SCORES\")\n\ndef mols_to_fingerprints(molecules, radius=3, useCounts=False, useFeatures=True):\n fingerprints = [AllChem.GetMorganFingerprint(\n mol,\n radius,\n useCounts=useCounts,\n useFeatures=useFeatures\n ) for mol in tqdm(molecules)]\n return fingerprints\n\nraw_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"data\", \"raw\")\nsys.path.append(raw_folder)\n\n#get series4 molecules for tanimoto similarity\ns4 = pd.read_csv(os.path.join(raw_folder, \"series4_processed.csv\"))\ns4_smiles = s4[\"smiles\"].tolist()\ns4_mols = [Chem.MolFromSmiles(smi) for smi in s4_smiles]\nref_fps=mols_to_fingerprints(s4_mols)\n\n\ndf = pd.read_csv(os.path.join(OUTPUT, \"data_3.csv\"))\nsmiles=df[\"Smiles\"].tolist()\nmols = [Chem.MolFromSmiles(smi) for smi in tqdm(smiles)]\nfps=mols_to_fingerprints(mols)\nsims = []\nfor fp in tqdm(fps):\n sim=BulkTanimotoSimilarity(fp, ref_fps)\n maxsim = np.max(sim)\n sims += [maxsim]\n\ndf[\"Similarity\"]=sims\ndf=df[df[\"Similarity\"] <= 0.70]\n\ndf.to_csv(os.path.join(OUTPUT, \"data_4.csv\"), index = False)\n" ]
[ [ "numpy.max" ] ]
OpenSourceEconomics/grmpy
[ "3ff5ec9cd108582c23cb61e6b8d87f4db6ceaee1" ]
[ "grmpy/read/read_auxiliary.py" ]
[ "\"\"\"This module provides auxiliary functions for the import process of the init file.\"\"\"\nimport numpy as np\n\n\ndef create_attr_dict_est(init_dict, semipar=False, include_constant=False):\n \"\"\"This function processes the imported initialization file so that it fulfills the\n requirements for the subsequent estimation process.\n \"\"\"\n init_dict[\"AUX\"] = {\"init_values\"}\n init_values = []\n\n if semipar is True:\n if include_constant is True:\n init_dict = add_constant(init_dict, semipar)\n else:\n pass\n\n init_dict = read_keys_semipar(init_dict, init_values)\n\n # semipar is False\n else:\n if include_constant is True:\n init_dict = add_constant(init_dict, semipar)\n else:\n pass\n\n init_dict = read_keys_par(init_dict, init_values)\n\n init_dict = provide_auxiliary_information(init_dict, init_values)\n\n return init_dict\n\n\ndef create_attr_dict_sim(init_dict):\n \"\"\"This function processes the imported initialization file so that it fulfills the\n requirements for the following simulation and estimation process.\n \"\"\"\n init_dict[\"AUX\"] = {\"init_values\"}\n init_values = []\n\n init_dict = read_keys_par(init_dict, init_values)\n init_dict = provide_auxiliary_information(init_dict, init_values)\n\n return init_dict\n\n\ndef add_constant(init_dict, semipar=False):\n \"\"\"The function checks if the user has provided a constant\n for the relevant subsections:\n [\"TREATED\", \"UNTREATED\", \"CHOICE\"] for the parametric, and\n [\"CHOICE\"] for the semiparamteric estimation, respectively.\n \"\"\"\n\n if semipar is True:\n if \"const\" not in init_dict[\"CHOICE\"][\"order\"]:\n init_dict[\"CHOICE\"][\"order\"].insert(0, \"const\")\n init_dict[\"CHOICE\"][\"params\"] = np.array([1.0])\n else:\n pass\n\n # semipar is False\n else:\n for key in [\"TREATED\", \"UNTREATED\", \"CHOICE\"]:\n if \"const\" not in init_dict[key][\"order\"]:\n init_dict[key][\"order\"].insert(0, \"const\")\n init_dict[key][\"params\"] = np.array([1.0])\n else:\n pass\n\n return init_dict\n\n\ndef read_keys_par(init_dict, init_values):\n \"\"\"This function reads the information provided by the\n [\"TREATED\", \"UNTREATED\", \"CHOICE\", \"DIST\"] keys for\n the simulation and parametric estimation.\n \"\"\"\n for key in [\"TREATED\", \"UNTREATED\", \"CHOICE\", \"DIST\"]:\n if \"params\" in init_dict[key].keys():\n init_dict[key][\"params\"] = np.array(init_dict[key][\"params\"])\n init_values += list(init_dict[key][\"params\"])\n else:\n init_values += [0.0] * len(init_dict[key][\"order\"])\n\n if np.all(init_dict[\"DIST\"][\"params\"] == 0):\n init_dict[\"DETERMINISTIC\"] = True\n else:\n init_dict[\"DETERMINISTIC\"] = False\n\n return init_dict\n\n\ndef read_keys_semipar(init_dict, init_values):\n \"\"\"This function reads the information provided by the\n [\"TREATED\", \"UNTREATED\", \"CHOICE\"] keys for\n semiparametric estimation.\n \"\"\"\n for key in [\"TREATED\", \"UNTREATED\", \"CHOICE\"]:\n if \"params\" in init_dict[key].keys():\n init_dict[key][\"params\"] = np.array(init_dict[key][\"params\"])\n init_values += list(init_dict[key][\"params\"])\n else:\n init_values += [0.0] * len(init_dict[key][\"order\"])\n\n return init_dict\n\n\ndef provide_auxiliary_information(init_dict, init_values):\n \"\"\"This function generates auxiliary information\n given the parameters in the initialization dictionary\n \"\"\"\n num_covars = len(\n set(\n init_dict[\"TREATED\"][\"order\"]\n + init_dict[\"UNTREATED\"][\"order\"]\n + init_dict[\"CHOICE\"][\"order\"]\n )\n )\n\n covar_label = []\n for section in [\"TREATED\", \"UNTREATED\", \"CHOICE\"]:\n covar_label += [i for i in init_dict[section][\"order\"] if i not in covar_label]\n\n # Generate the AUX section that include some additional auxiliary information\n init_dict[\"AUX\"] = {\n \"init_values\": np.array(init_values),\n \"num_covars_choice\": len(init_dict[\"CHOICE\"][\"order\"]),\n \"num_covars_treated\": len(init_dict[\"TREATED\"][\"order\"]),\n \"num_covars_untreated\": len(init_dict[\"UNTREATED\"][\"order\"]),\n \"num_paras\": len(init_values) + 1,\n \"num_covars\": num_covars,\n \"labels\": covar_label,\n }\n\n return init_dict\n" ]
[ [ "numpy.array", "numpy.all" ] ]
mwizasimbeye11/hub
[ "d743b0f14ee538e8bb50006895779b048d0f4db1" ]
[ "tensorflow_hub/feature_column_test.py" ]
[ "# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow_hub.feature_column.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\ntry:\n import mock as mock\nexcept ImportError:\n import unittest.mock as mock\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow_hub import tf_v1\n\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.feature_column import feature_column_v2\nfrom tensorflow.python.ops.lookup_ops import HashTable\nfrom tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer\n# pylint: enable=g-direct-tensorflow-import\n\n\ndef text_module_fn():\n embeddings = [\n (\"\", [0, 0, 0, 0]), # OOV items are mapped to this embedding.\n (\"hello world\", [1, 2, 3, 4]),\n (\"pair-programming\", [5, 5, 5, 5]),\n ]\n keys = tf.constant([item[0] for item in embeddings], dtype=tf.string)\n indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64)\n tbl_init = KeyValueTensorInitializer(keys, indices)\n table = HashTable(tbl_init, 0)\n\n weights_initializer = tf.cast(\n tf.constant(list([item[1] for item in embeddings])), tf.float32)\n\n weights = tf_v1.get_variable(\n \"weights\", dtype=tf.float32, initializer=weights_initializer)\n\n text_tensor = tf_v1.placeholder(dtype=tf.string, name=\"text\", shape=[None])\n indices_tensor = table.lookup(text_tensor)\n embedding_tensor = tf.gather(weights, indices_tensor)\n hub.add_signature(inputs=text_tensor, outputs=embedding_tensor)\n\n\ndef invalid_text_module_fn():\n text = tf_v1.placeholder(tf.string, shape=[10])\n hub.add_signature(inputs=text, outputs=tf.zeros([10, 3]))\n\n\ndef export_module_spec(spec, export_path):\n \"\"\"Export module with random initialization.\"\"\"\n with tf_v1.Graph().as_default():\n m = hub.Module(spec)\n with tf_v1.Session() as session:\n session.run(tf_v1.initializers.global_variables())\n m.export(export_path, session)\n\n\nclass CommonColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n @mock.patch.object(feature_column_v2._StateManagerImpl, \"add_resource\")\n def testFeatureColumnsWithResources(self, mock_add_resource):\n feature_column = hub.text_embedding_column(\"text_a\", self.spec)\n if not isinstance(feature_column, feature_column_v2.FeatureColumn):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n self.assertTrue(feature_column_v2.is_feature_column_v2([feature_column]))\n\n @mock.patch.object(feature_column_v2._StateManagerImpl, \"add_resource\")\n def testFeatureColumnsWithNoResources(self, mock_add_resource):\n mock_add_resource.side_effect = NotImplementedError\n feature_column = hub.text_embedding_column(\"text_a\", self.spec)\n self.assertFalse(feature_column_v2.is_feature_column_v2([feature_column]))\n\n\nclass TextEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n def testVariableShape(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n self.assertEqual(text_column._variable_shape, [4])\n\n def testParents(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n self.assertEqual([\"text\"], text_column.parents)\n\n def testMakeParseExampleSpec(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])\n self.assertEqual(parsing_spec,\n {\"text\": tf_v1.FixedLenFeature([1], dtype=tf.string)})\n\n def testInputLayer(self):\n features = {\n \"text_a\": [\"hello world\", \"pair-programming\"],\n \"text_b\": [\"hello world\", \"oov token\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text_a\", self.spec, trainable=False),\n hub.text_embedding_column(\"text_b\", self.spec, trainable=False),\n ]\n with tf.Graph().as_default():\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllEqual(\n output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])\n\n def testDenseFeatures(self):\n features = {\n \"text_a\": [\"hello world\", \"pair-programming\"],\n \"text_b\": [\"hello world\", \"oov token\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text_a\", self.spec, trainable=False),\n hub.text_embedding_column(\"text_b\", self.spec, trainable=False),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n # We want to test with dense_features_v2.DenseFeatures. This symbol was\n # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a.\n feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)\n feature_layer_out = feature_layer(features)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(feature_layer_out)\n self.assertAllEqual(\n output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])\n\n def testDenseFeatures_shareAcrossApplication(self):\n features = {\n \"text\": [\"hello world\", \"pair-programming\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text\", self.spec, trainable=True),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n # We want to test with dense_features_v2.DenseFeatures. This symbol was\n # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a.\n feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)\n feature_layer_out_1 = feature_layer(features)\n feature_layer_out_2 = feature_layer(features)\n\n # We define loss only on the first layer. Since layers should have shared\n # weights, we expect the second layer will change too.\n loss = feature_layer_out_1 - tf.constant(0.005)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.7)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n before_update_1 = sess.run(feature_layer_out_1)\n sess.run(train_op)\n after_update_1 = sess.run(feature_layer_out_1)\n after_update_2 = sess.run(feature_layer_out_2)\n\n self.assertAllEqual(before_update_1, [[1, 2, 3, 4],\n [5, 5, 5, 5]])\n self.assertAllEqual(after_update_1, after_update_2)\n\n def testWorksWithCannedEstimator(self):\n comment_embedding_column = hub.text_embedding_column(\n \"comment\", self.spec, trainable=False)\n upvotes = tf_v1.feature_column.numeric_column(\"upvotes\")\n\n feature_columns = [comment_embedding_column, upvotes]\n estimator = tf_v1.estimator.DNNClassifier(\n hidden_units=[10],\n feature_columns=feature_columns,\n model_dir=self.get_temp_dir())\n\n # This only tests that estimator apis are working with the feature\n # column without throwing exceptions.\n features = {\n \"comment\": np.array([\n [\"the quick brown fox\"],\n [\"spam spam spam\"],\n ]),\n \"upvotes\": np.array([\n [20],\n [1],\n ]),\n }\n labels = np.array([[1], [0]])\n numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn\n input_fn = numpy_input_fn(features, labels, shuffle=True)\n estimator.train(input_fn, max_steps=1)\n estimator.evaluate(input_fn, steps=1)\n estimator.predict(input_fn)\n\n def testTrainableEmbeddingColumn(self):\n feature_columns = [\n hub.text_embedding_column(\"text\", self.spec, trainable=True),\n ]\n\n with tf.Graph().as_default():\n features = {\n \"text\": [\"hello world\", \"pair-programming\"],\n }\n target = [[1, 1, 1, 1], [4, 3, 2, 1]]\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n\n loss = tf.cast(\n tf_v1.losses.mean_squared_error(input_layer, target), tf.float64)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])\n for _ in range(10):\n sess.run(train_op)\n self.assertAllClose(sess.run(input_layer), target, atol=0.5)\n\n def testInvalidTextModule(self):\n spec = hub.create_module_spec(invalid_text_module_fn)\n with self.assertRaisesRegexp(ValueError, \"only one input\"):\n hub.text_embedding_column(\"coment\", spec, trainable=False)\n\n def testConfig(self):\n module_path = os.path.join(self.get_temp_dir(), \"module\")\n export_module_spec(self.spec, module_path)\n text_column = hub.text_embedding_column(\"text\", module_path)\n config = text_column.get_config()\n cloned_text_column = hub.feature_column._TextEmbeddingColumn.from_config(\n config)\n self.assertEqual(cloned_text_column.module_spec_path,\n text_column.module_spec_path)\n\n with self.assertRaisesRegexp(NotImplementedError, \"Can only generate\"):\n text_column = hub.text_embedding_column(\"text\", self.spec)\n config = text_column.get_config()\n\n\ndef create_image_module_fn(randomly_initialized=False):\n def image_module_fn():\n \"\"\"Maps 1x2 images to sums of each color channel.\"\"\"\n images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3])\n if randomly_initialized:\n initializer = tf_v1.random_uniform_initializer(\n minval=-1, maxval=1, dtype=tf.float32)\n else:\n initializer = tf_v1.constant_initializer(1.0, dtype=tf.float32)\n weight = tf_v1.get_variable(\n name=\"weight\", shape=[1], initializer=initializer)\n sum_channels = tf.reduce_sum(images, axis=[1, 2]) * weight\n hub.add_signature(inputs={\"images\": images}, outputs=sum_channels)\n return image_module_fn\n\n\nclass ImageEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(create_image_module_fn())\n self.randomly_initialized_spec = hub.create_module_spec(\n create_image_module_fn(randomly_initialized=True))\n\n def testExpectedImageSize(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n # The usage comment recommends this code pattern, so we test it here.\n self.assertSequenceEqual(\n hub.get_expected_image_size(image_column.module_spec), [1, 2])\n\n def testVariableShape(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n self.assertEqual(image_column.variable_shape, [3])\n\n def testParents(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n self.assertEqual([\"image\"], image_column.parents)\n\n def testMakeParseExampleSpec(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([image_column])\n self.assertEqual(\n parsing_spec,\n {\"image\": tf_v1.FixedLenFeature([1, 2, 3], dtype=tf.float32)})\n\n def testInputLayer(self):\n features = {\n \"image_a\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n \"image_b\": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],\n [[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image_a\", self.spec),\n hub.image_embedding_column(\"image_b\", self.spec),\n ]\n with tf.Graph().as_default():\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllClose(\n output,\n [[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])\n\n def testDenseFeatures(self):\n features = {\n \"image_a\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n \"image_b\": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],\n [[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image_a\", self.spec),\n hub.image_embedding_column(\"image_b\", self.spec),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n # We want to test with dense_features_v2.DenseFeatures. This symbol was\n # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a.\n feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)\n feature_layer_out = feature_layer(features)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(feature_layer_out)\n self.assertAllClose(\n output,\n [[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])\n\n def testDenseFeatures_shareAcrossApplication(self):\n features = {\n \"image\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image\", self.randomly_initialized_spec),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n # We want to test with dense_features_v2.DenseFeatures. This symbol was\n # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a.\n feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)\n feature_layer_out_1 = feature_layer(features)\n feature_layer_out_2 = feature_layer(features)\n\n with tf_v1.train.MonitoredSession() as sess:\n output_1 = sess.run(feature_layer_out_1)\n output_2 = sess.run(feature_layer_out_2)\n\n self.assertAllClose(output_1, output_2)\n\n def testWorksWithCannedEstimator(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n other_column = tf_v1.feature_column.numeric_column(\"number\")\n\n feature_columns = [image_column, other_column]\n estimator = tf_v1.estimator.DNNClassifier(\n hidden_units=[10],\n feature_columns=feature_columns,\n model_dir=self.get_temp_dir())\n\n # This only tests that estimator apis are working with the feature\n # column without throwing exceptions.\n features = {\n \"image\":\n np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n dtype=np.float32),\n \"number\":\n np.array([[20], [1]]),\n }\n labels = np.array([[1], [0]])\n numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn\n input_fn = numpy_input_fn(features, labels, shuffle=True)\n estimator.train(input_fn, max_steps=1)\n estimator.evaluate(input_fn, steps=1)\n estimator.predict(input_fn)\n\n def testConfig(self):\n module_path = os.path.join(self.get_temp_dir(), \"module\")\n export_module_spec(self.spec, module_path)\n image_column = hub.image_embedding_column(\"image\", module_path)\n config = image_column.get_config()\n cloned_image_column = hub.feature_column._ImageEmbeddingColumn.from_config(\n config)\n self.assertEqual(cloned_image_column.module_spec_path,\n image_column.module_spec_path)\n\n with self.assertRaisesRegexp(NotImplementedError, \"Can only generate\"):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n config = image_column.get_config()\n\n def testName(self):\n image_column = hub.image_embedding_column(\n tf.feature_column.numeric_column(\"image\"), self.spec)\n self.assertEqual(\"image_hub_module_embedding\", image_column.name)\n\n\nclass SparseTextEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n def testVariableShape(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, combiner=\"mean\", default_value=None, trainable=False)\n self.assertEqual(text_column._variable_shape, [4])\n\n def testMakeParseExampleSpec(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, combiner=\"mean\", default_value=None, trainable=False)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])\n self.assertEqual(parsing_spec, {\"text\": tf_v1.VarLenFeature(tf.string)})\n\n def testParents(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, \"sum\", \"\", trainable=False)\n self.assertEqual([\"text\"], text_column.parents)\n\n def testInputLayer(self):\n with tf.Graph().as_default():\n text_a = tf.SparseTensor(\n values=[\"hello world\", \"pair-programming\", \"hello world\"],\n indices=[[0, 0], [0, 1], [1, 0]],\n dense_shape=[2, 2])\n text_b = tf.SparseTensor(\n values=[\"hello world\", \"oov token\"],\n indices=[[0, 0], [0, 1]],\n dense_shape=[2, 3])\n\n features = {\n \"text_a\": text_a,\n \"text_b\": text_b,\n }\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text_a\",\n self.spec,\n combiner=\"mean\",\n default_value=\"__UNKNOWN__\",\n trainable=False),\n hub.sparse_text_embedding_column(\n \"text_b\",\n self.spec,\n combiner=\"mean\",\n default_value=\"__UNKNOWN__\",\n trainable=False),\n ]\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllEqual(\n output,\n [[3, 3.5, 4, 4.5, 0.5, 1, 1.5, 2], [1, 2, 3, 4, 0, 0, 0, 0]])\n # ([1, 2, 3, 4] + [5, 5, 5, 5])/2 extend ([1, 2, 3, 4] + [0, 0, 0, 0])/2\n # [1, 2, 3, 4] extend [0, 0, 0, 0]\n\n def testTrainableEmbeddingColumn(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=None,\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=[\"hello world\", \"pair-programming\"],\n indices=[[0, 0], [1, 0]],\n dense_shape=[2, 2])\n\n target = [[1, 1, 1, 1], [4, 3, 2, 1]]\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n loss = tf_v1.losses.mean_squared_error(input_layer, target)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])\n for _ in range(10):\n sess.run(train_op)\n self.assertAllClose(sess.run(input_layer), target, atol=0.5)\n\n def testEmptySparseTensorBatch(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=\"default\",\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=tf_v1.constant([], dtype=tf_v1.string, shape=[0]),\n indices=tf_v1.constant([], dtype=tf_v1.int64, shape=[0, 2]),\n dense_shape=[3, 0])\n\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n with tf_v1.train.MonitoredSession() as sess:\n embeddings = sess.run(input_layer)\n self.assertAllEqual(embeddings,\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n\n def testEmptySparseTensorRow(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=\"default\",\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=tf_v1.constant([\"hello world\"], dtype=tf_v1.string, shape=[1]),\n indices=tf_v1.constant([[0, 0]], dtype=tf_v1.int64, shape=[1, 2]),\n dense_shape=[2, 1])\n\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n with tf_v1.train.MonitoredSession() as sess:\n embeddings = sess.run(input_layer)\n self.assertAllEqual(embeddings, [[1, 2, 3, 4], [0, 0, 0, 0]])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.feature_column.numeric_column", "tensorflow.zeros", "tensorflow.gather", "tensorflow.compat.v2.keras.layers.DenseFeatures", "tensorflow.Graph", "tensorflow.python.feature_column.feature_column_v2.is_feature_column_v2", "tensorflow.SparseTensor", "tensorflow.python.ops.lookup_ops.HashTable", "numpy.array", "tensorflow.constant", "tensorflow.reduce_sum", "tensorflow.python.ops.lookup_ops.KeyValueTensorInitializer", "tensorflow.test.main" ] ]
Laurans/procgen_adventure
[ "5f88f3f647f7854c8fb2ae516f3490d89845eefa" ]
[ "procgen_adventure/utils/torch_utils.py" ]
[ "import numpy as np\nimport torch\nimport torch.distributed as dist\n\n\ndef tensor(x, device):\n if isinstance(x, torch.Tensor):\n return x.to(device)\n\n x = np.asarray(x, dtype=np.float)\n x = torch.tensor(x, device=device, dtype=torch.float32)\n return x\n\n\ndef input_preprocessing(x, device):\n x = tensor(x, device)\n x = x.float()\n x /= 255.0\n return x\n\n\ndef to_np(t):\n return t.cpu().detach().numpy()\n\n\ndef random_seed(seed=None):\n np.random.seed(seed)\n torch.manual_seed(np.random.randint(int(1e6)))\n\n\ndef restore_model(model, save_path):\n checkpoint = torch.load(save_path)\n model.network.load_state_dict(checkpoint[\"model_state_dict\"])\n model.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n update = checkpoint[\"update\"]\n return update\n\n\ndef sync_initial_weights(model):\n for param in model.parameters():\n dist.broadcast(param.data, src=0)\n\n\ndef sync_gradients(model):\n for param in model.parameters():\n dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)\n\n\ndef cleanup():\n dist.destroy_process_group()\n\n\ndef sync_values(tensor_sum_values, tensor_nb_values):\n dist.reduce(tensor_sum_values, dst=0)\n dist.reduce(tensor_nb_values, dst=0)\n return tensor_sum_values / tensor_nb_values\n\n\ndef range_tensor(t, device):\n return torch.arange(t).long().to(device)\n\n\ndef zeros(shape, dtype):\n \"\"\"Attempt to return torch tensor of zeros, or if numpy dtype provided,\n return numpy array or zeros.\"\"\"\n try:\n return torch.zeros(shape, dtype=dtype)\n except TypeError:\n return np.zeros(shape, dtype=dtype)\n" ]
[ [ "torch.load", "torch.distributed.broadcast", "numpy.zeros", "numpy.random.seed", "torch.tensor", "numpy.asarray", "torch.arange", "torch.distributed.all_reduce", "torch.zeros", "torch.distributed.destroy_process_group", "torch.distributed.reduce" ] ]
vgliner/Chlng_20_Sub
[ "169d098e5315510df83ad988c7e2067317cef4cf" ]
[ "ECG_Dataloader_Brazilian_records.py" ]
[ "from torch.utils.data import Dataset\nimport os\nimport scipy.io as sio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport pandas as pd\nimport random\nfrom scipy.io import loadmat\nimport Utils\nfrom scipy import interpolate\nfrom scipy import signal\nimport csv\nfrom scipy.signal import butter, lfilter, freqz\nimport re\nfrom glob import glob\nimport time\nimport pickle\n\n\n\"\"\"\nIt contains annotations about 6 different ECGs abnormalities:\n- 1st degree AV block (1dAVb);\n- right bundle branch block (RBBB);\n- left bundle branch block (LBBB);\n- sinus bradycardia (SB);\n- atrial fibrillation (AF); \n- sinus tachycardia (ST).\n\nNotation of multiclass_to_binary_type: \n[-1] Return multiclass [0] I-AVB, [1] RBBB, [2] LBBB, [3] SB, [4] AF, [5] ST\n\"\"\"\n\nPRINT_FLAG = False\n\n\nclass ECG_Multilead_Dataset_Brazilian_records(Dataset):\n def __init__(self, root_dir=None, transform=None, multiclass=False,\n binary_class_type=1, apply_aurmentation=True, random_augmentation=True,\n augmentation_method=None, record_length=60, to_normalize=True, Uploading_method='HDD',\n old_format= False):\n # record_length [sec]\n # Uploading_method = 'HDD'\\'RAM'\\'cache'\n super().__init__()\n self.data = []\n self.samples = None\n self.root_dir = root_dir\n self.transform = transform\n self.multiclass = multiclass\n self.binary_class_type = binary_class_type\n self.apply_aurmentation = apply_aurmentation\n self.random_augmentation = random_augmentation\n self.augmentation_method = augmentation_method\n self.database_length = 0\n self.data_mutual_sample_rate = 500\n self.record_length = record_length * self.data_mutual_sample_rate\n self.to_normalize = to_normalize\n self.Uploading_method = Uploading_method\n self.brazilian_database_path = None\n self.brazilian_annotations_path = None\n self.sample_rate = 400\n self.maximal_length = self.sample_rate * self.record_length\n\n if not multiclass:\n assert binary_class_type >= 0, 'Class selection is mandatory for single class classification'\n\n if self.root_dir is None:\n paths = Utils.read_config_file()\n self.brazilian_database_path = paths[1]\n self.brazilian_annotations_path = paths[2]\n self.brazilian_annotations_dict_path = paths[3]\n\n else:\n self.brazilian_database_path = self.root_dir + dataset_filename\n\n self.f = h5py.File(self.brazilian_database_path, \"r\")\n self.data_ids = np.array(self.f['id_exam'])\n self.data = self.f['signal']\n start = time.process_time()\n self.annotations = pd.read_csv(self.brazilian_annotations_path)\n end = time.process_time()\n print(f'Uploading annotations took {end-start} sec.')\n start = time.process_time()\n\n # Convert Data Frame to Dictionary (set_index method allows any column to be used as index)\n with open(self.brazilian_annotations_dict_path, 'rb') as handle:\n self.annotations_dict = pickle.load(handle)\n #self.annotations_dict = self.annotations.set_index('id_exam').transpose().to_dict(orient='dict')\n end = time.process_time()\n print(f'Uploading annotations dictionary took {end-start} sec.')\n print('finished')\n\n self.loaded_data = {}\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n\n if idx not in self.loaded_data.keys():\n sample = self.data[idx]\n data_id = self.data_ids[idx]\n sample = np.transpose(sample)\n annotation = self.annotations_dict[data_id]\n annotation = list(annotation.values())[3:]\n sample = (sample, annotation)\n else:\n sample = self.loaded_data[idx]\n\n if self.to_normalize:\n sample = self.normalization(sample)\n\n if self.binary_class_type >= 0 and not self.multiclass:\n sample[1] = sample[1][int(self.binary_class_type)]\n\n if self.multiclass:\n sample[1] = np.stack(sample[1])\n\n if self.Uploading_method == 'cache' and idx not in self.loaded_data.keys():\n self.loaded_data[idx] = sample\n\n if self.apply_aurmentation:\n sample = self.augmentation_algorithm(sample)\n\n return sample\n\n def find_annotations(self, id_to_find):\n a= list(self.annotations['id_exam']).index(id_to_find)\n return list(self.annotations.iloc[a].values[4:])\n\n @staticmethod\n def plot(sample):\n item_to_plot = sample[0]\n fig, axes = plt.subplots(nrows=6, ncols=2)\n fig.suptitle(np.array2string(sample[1]), fontsize=14)\n titles = ['Lead1', 'Lead2', 'Lead3', 'aVR', 'aVL', 'aVF', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']\n b = item_to_plot\n for ax, cntr in zip(axes.flatten(), range(12)):\n ax.plot(b[cntr, :], linewidth=1.0)\n ax.set(title=titles[cntr])\n plt.plot()\n plt.show()\n return\n\n @staticmethod\n def plot_one_strip(one_strip):\n item_to_plot = one_strip\n plt.plot(item_to_plot)\n plt.show()\n return\n\n\n def augmentation_algorithm(self, record):\n current_record_length = record[0].shape[1]\n if current_record_length == self.record_length:\n return record\n if current_record_length <= self.record_length: # record is shorter than maximal length or similar\n new_sample = np.zeros((12, self.record_length))\n index_for_pasting = random.sample(range(self.record_length - current_record_length), 1)\n new_sample[:, index_for_pasting[0]:index_for_pasting[0] + current_record_length] = record[0]\n else: # record is longer than maximal length\n index_for_pasting = random.sample(range(current_record_length - self.record_length), 1)\n new_sample = record[0][:, index_for_pasting[0]:index_for_pasting[0] + self.record_length]\n return [new_sample, record[1]]\n\n @staticmethod\n def normalization(record):\n sample = record[0]\n for i, strip in enumerate(sample):\n max_ = np.max(strip)\n min_ = np.min(strip)\n if max_ - min_ == 0:\n sample[i] = strip\n else:\n sample[i] = (strip - min_) / (max_ - min_)\n return [sample, record[1]] \n\n\ndef test_Brazilian_db_dataloader():\n print('Testing Brazilian database')\n ds = ECG_Multilead_Dataset_Brazilian_records()\n start = time.process_time()\n for record_counter in range(len(ds)):\n ds_record = ds[record_counter]\n # ds.plot(ds_record)\n if record_counter %10000 ==0:\n stop = time.process_time()\n print(f'Loaded record # {record_counter}, time : {stop-start}')\n print('Finished testing')\n\n\nif __name__ == \"__main__\":\n test_Brazilian_db_dataloader()\n" ]
[ [ "numpy.transpose", "numpy.zeros", "numpy.array2string", "pandas.read_csv", "numpy.stack", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.max", "numpy.min", "numpy.array", "matplotlib.pyplot.plot" ] ]
f0nzie/rTorch
[ "40292ecd8a9ac1af6a03247cbb5f7a3227d60e2f" ]
[ "inst/python/torchtools/data_util.py" ]
[ "import gzip\nimport os\nfrom os import path\nimport numpy as np\n\nimport sys\nif sys.version_info.major < 3:\n import urllib\nelse:\n import urllib.request as request\n\n\nDATASET_DIR = 'datasets/'\n\nMNIST_FILES = [\"train-images-idx3-ubyte.gz\", \"train-labels-idx1-ubyte.gz\",\n \"t10k-images-idx3-ubyte.gz\", \"t10k-labels-idx1-ubyte.gz\"]\n\n\ndef download_file(url, local_path):\n dir_path = path.dirname(local_path)\n if not path.exists(dir_path):\n print(\"Creating the directory '%s' ...\" % dir_path)\n os.makedirs(dir_path)\n\n print(\"Downloading from '%s' ...\" % url)\n if sys.version_info.major < 3:\n urllib.URLopener().retrieve(url, local_path)\n else:\n request.urlretrieve(url, local_path)\n\n\ndef download_mnist(local_path):\n url_root = \"http://yann.lecun.com/exdb/mnist/\"\n for f_name in MNIST_FILES:\n f_path = os.path.join(local_path, f_name)\n if not path.exists(f_path):\n download_file(url_root + f_name, f_path)\n\n\ndef one_hot(x, n):\n if type(x) == list:\n x = np.array(x)\n x = x.flatten()\n o_h = np.zeros((len(x), n))\n o_h[np.arange(len(x)), x] = 1\n return o_h\n\n\ndef load_mnist(ntrain=60000, ntest=10000, onehot=True):\n data_dir = os.path.join(DATASET_DIR, 'mnist_digits/')\n if not path.exists(data_dir):\n download_mnist(data_dir)\n else:\n # check all files\n checks = [path.exists(os.path.join(data_dir, f)) for f in MNIST_FILES]\n if not np.all(checks):\n download_mnist(data_dir)\n\n with gzip.open(os.path.join(data_dir, 'train-images-idx3-ubyte.gz')) as fd:\n buf = fd.read()\n loaded = np.frombuffer(buf, dtype=np.uint8)\n trX = loaded[16:].reshape((60000, 28 * 28)).astype(float)\n\n with gzip.open(os.path.join(data_dir, 'train-labels-idx1-ubyte.gz')) as fd:\n buf = fd.read()\n loaded = np.frombuffer(buf, dtype=np.uint8)\n trY = loaded[8:].reshape((60000))\n\n with gzip.open(os.path.join(data_dir, 't10k-images-idx3-ubyte.gz')) as fd:\n buf = fd.read()\n loaded = np.frombuffer(buf, dtype=np.uint8)\n teX = loaded[16:].reshape((10000, 28 * 28)).astype(float)\n\n with gzip.open(os.path.join(data_dir, 't10k-labels-idx1-ubyte.gz')) as fd:\n buf = fd.read()\n loaded = np.frombuffer(buf, dtype=np.uint8)\n teY = loaded[8:].reshape((10000))\n\n trX /= 255.\n teX /= 255.\n\n trX = trX[:ntrain]\n trY = trY[:ntrain]\n\n teX = teX[:ntest]\n teY = teY[:ntest]\n\n if onehot:\n trY = one_hot(trY, 10)\n teY = one_hot(teY, 10)\n else:\n trY = np.asarray(trY)\n teY = np.asarray(teY)\n\n return trX, teX, trY, teY\n" ]
[ [ "numpy.array", "numpy.all", "numpy.asarray", "numpy.frombuffer" ] ]
sunher/game
[ "84b01b2c69b5cdecbc301fb0e56380ff06bfe353" ]
[ "snakeai/gameplayAttackAndHideRandom/environmentattackandhiderandom.py" ]
[ "import pprint\nimport random\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nfrom .entities import Snake, Field, CellType, SnakeAction, ALL_SNAKE_ACTIONS, SnakeDirection, Point\n\n\nclass EnvironmentAttackAndHideRandom(object):\n \"\"\"\n Represents the RL environment for the Snake game that implements the game logic,\n provides rewards for the agent and keeps track of game statistics.\n \"\"\"\n\n def __init__(self, config, verbose=1):\n \"\"\"\n Create a new Snake RL environment.\n\n Args:\n config (dict): level configuration, typically found in JSON configs.\n verbose (int): verbosity level:\n 0 = do not write any debug information;\n 1 = write a CSV file containing the statistics for every episode;\n 2 = same as 1, but also write a full log file containing the state of each timestep.\n \"\"\"\n self.field = Field(level_map=config['field'])\n self.snake = None\n self.fruit = []\n self.poison = []\n self.poison_num = 0\n self.initial_snake_length = config['initial_snake_length']\n self.rewards = config['rewards']\n self.max_step_limit = config.get('max_step_limit', 1000)\n self.is_game_over = False\n\n self.timestep_index = 0\n self.current_action = None\n self.stats = EpisodeStatistics()\n self.verbose = verbose\n self.debug_file = None\n self.stats_file = None\n self.enemy = None\n\n def seed(self, value):\n \"\"\" Initialize the random state of the environment to make results reproducible. \"\"\"\n random.seed(value)\n np.random.seed(value)\n\n def get_random_empty_cell(self):\n return self.field.get_random_empty_cell()\n\n @property\n def observation_shape(self):\n \"\"\" Get the shape of the state observed at each timestep. \"\"\"\n return self.field.size, self.field.size\n\n @property\n def num_actions(self):\n \"\"\" Get the number of actions the agent can take. \"\"\"\n return len(ALL_SNAKE_ACTIONS)\n\n def new_episode(self):\n \"\"\" Reset the environment and begin a new episode. \"\"\"\n self.field.create_level()\n self.generate_rand_wall()\n # print(self.field._cells)\n self.stats.reset()\n self.timestep_index = 0\n\n self.enemy = None\n self.fruit = []\n self.poison = []\n self.poison_num = 0\n self.snake = Snake(self.field.get_random_empty_cell(), length=self.initial_snake_length)\n self.field.place_snake(self.snake)\n self.generate_emeny()\n self.generate_poison()\n self.current_action = None\n self.is_game_over = False\n\n result = TimestepResult(\n observation=self.get_observation(),\n reward=0,\n is_episode_end=self.is_game_over\n )\n\n self.record_timestep_stats(result)\n return result\n\n def getResult(self):\n result = TimestepResult(\n observation=self.get_observation(),\n reward=0,\n is_episode_end=self.is_game_over\n )\n\n self.record_timestep_stats(result)\n return result\n\n def record_timestep_stats(self, result):\n \"\"\" Record environment statistics according to the verbosity level. \"\"\"\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n\n # Write CSV header for the stats file.\n if self.verbose >= 1 and self.stats_file is None:\n self.stats_file = open('snake-env-{timestamp}.csv', 'w')\n stats_csv_header_line = self.stats.to_dataframe()[:0].to_csv(index=None)\n # print(stats_csv_header_line, self.stats_file, '', flush=True)\n\n # Create a blank debug log file.\n # if self.verbose >= 2 and self.debug_file is None:\n # self.debug_file = open('snake-env-{timestamp}.log', 'w')\n\n self.stats.record_timestep(self.current_action, result)\n self.stats.timesteps_survived = self.timestep_index\n\n # if self.verbose >= 2:\n # print(result, self.debug_file)\n\n # # Log episode stats if the appropriate verbosity level is set.\n # if result.is_episode_end:\n # if self.verbose >= 1:\n # stats_csv_line = self.stats.to_dataframe().to_csv(header=False, index=None)\n # print(stats_csv_line, self.stats_file, '', flush=True)\n # if self.verbose >= 2:\n # print(self.stats, self.debug_file)\n\n def get_observation(self):\n \"\"\" Observe the state of the environment. \"\"\"\n return np.copy(self.field._cells)\n\n def choose_action(self, action):\n \"\"\" Choose the action that will be taken at the next timestep. \"\"\"\n\n self.current_action = action\n if action == SnakeAction.TURN_LEFT1:\n self.snake.turn_left()\n elif action == SnakeAction.TURN_LEFT2:\n self.snake.turn_left()\n self.snake.turn_left()\n elif action == SnakeAction.TURN_LEFT3:\n self.snake.turn_left()\n self.snake.turn_left()\n self.snake.turn_left()\n elif action == SnakeAction.TURN_RIGHT1:\n self.snake.turn_right()\n elif action == SnakeAction.TURN_RIGHT2:\n self.snake.turn_right()\n self.snake.turn_right()\n elif action == SnakeAction.TURN_RIGHT3:\n self.snake.turn_right()\n self.snake.turn_right()\n self.snake.turn_right()\n\n def create_wall(self, pos):\n # self.point(pos).type = PointType.WALL\n self.field[pos] = CellType.WALL\n\n def create_fix_wall_1(self):\n wall_pos = [Point(3, 4), Point(3, 5), Point(3, 6), Point(3, 7), Point(3, 8), Point(3, 9),\n Point(6, 3), Point(6, 4), Point(6, 5), Point(6, 8), Point(6, 9), Point(6, 10),\n Point(7, 6),\n Point(8, 5), Point(8, 8),\n Point(9, 4), Point(9, 9),\n Point(10, 3), Point(10, 5), Point(10, 6), Point(10, 7), Point(10, 8), Point(10, 10),\n Point(11, 11)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_2(self):\n wall_pos = [Point(2, 3), Point(2, 10),\n Point(3, 3), Point(3, 10),\n Point(4, 4), Point(4, 9),\n Point(5, 5), Point(5, 8),\n Point(6, 6), Point(6, 7),\n Point(7, 3), Point(7, 10),\n Point(8, 3), Point(8, 6), Point(8, 7), Point(8, 10),\n Point(9, 3), Point(9, 6), Point(9, 7), Point(9, 10),\n Point(10, 4), Point(10, 5), Point(10, 8), Point(10, 9)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_3(self):\n wall_pos = [Point(3, 2), Point(3, 3), Point(3, 8), Point(3, 9),\n Point(4, 4), Point(4, 7), Point(4, 10),\n Point(5, 4), Point(5, 7), Point(5, 10),\n Point(6, 3),\n Point(7, 2), Point(7, 7), Point(7, 10),\n Point(8, 2), Point(8, 7), Point(8, 10),\n Point(9, 2), Point(9, 7), Point(9, 10),\n Point(10, 3), Point(10, 4), Point(10, 8), Point(10, 9)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_4(self):\n wall_pos = [Point(3, 3), Point(3, 7), Point(3, 8), Point(3, 9),\n Point(4, 3), Point(4, 6), Point(4, 10),\n Point(5, 3), Point(5, 6), Point(5, 10),\n Point(6, 8),\n Point(7, 3), Point(7, 6), Point(7, 10),\n Point(8, 3), Point(8, 6), Point(8, 10),\n Point(9, 3), Point(9, 6), Point(9, 10),\n Point(10, 3), Point(10, 7), Point(10, 8), Point(10, 9)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_5(self):\n wall_pos = [Point(1, 2), Point(1, 6), Point(1, 7), Point(1, 11),\n Point(2, 1), Point(2, 4), Point(2, 9), Point(2, 12),\n Point(3, 3), Point(3, 6), Point(3, 7), Point(3, 10),\n Point(4, 2), Point(4, 5), Point(4, 8), Point(4, 11),\n Point(5, 4), Point(5, 9),\n Point(6, 1), Point(6, 3), Point(6, 10), Point(6, 12),\n Point(7, 1), Point(7, 3), Point(7, 10), Point(7, 12),\n Point(8, 4), Point(8, 9),\n Point(9, 2), Point(9, 5), Point(9, 8), Point(9, 11),\n Point(10, 3), Point(10, 6), Point(10, 7), Point(10, 10),\n Point(11, 1), Point(11, 4), Point(11, 9), Point(11, 12),\n Point(12, 2), Point(12, 6), Point(12, 7), Point(12, 11)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_6(self):\n wall_pos = [Point(1, 3), Point(1, 6), Point(1, 9),\n Point(2, 2), Point(2, 5), Point(2, 8), Point(2, 11),\n Point(3, 1), Point(3, 4), Point(3, 7), Point(3, 10), Point(3, 12),\n Point(4, 3), Point(4, 6), Point(4, 9),\n Point(5, 2), Point(5, 8), Point(5, 11),\n Point(6, 1), Point(6, 4), Point(6, 12),\n Point(7, 3), Point(7, 10),\n Point(8, 2), Point(8, 5), Point(8, 8), Point(8, 11),\n Point(9, 1), Point(9, 4), Point(9, 6), Point(9, 9), Point(9, 12),\n Point(10, 3), Point(10, 7), Point(10, 10),\n Point(11, 2), Point(11, 5), Point(11, 8), Point(11, 11),\n Point(12, 3), Point(12, 6), Point(12, 9), Point(12, 12)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_7(self):\n wall_pos = [Point(2, 2), Point(2, 11),\n Point(3, 3), Point(3, 4), Point(3, 5), Point(3, 6), Point(3, 7), Point(3, 8), Point(3, 9),\n Point(3, 10),\n Point(5, 3), Point(5, 10),\n Point(6, 3), Point(6, 10),\n Point(7, 3), Point(7, 10),\n Point(8, 3), Point(8, 10),\n Point(10, 3), Point(10, 4), Point(10, 5), Point(10, 6), Point(10, 7), Point(10, 8), Point(10, 9),\n Point(10, 10),\n Point(11, 2), Point(11, 11)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_8(self):\n wall_pos = [Point(1, 3), Point(1, 4), Point(1, 9), Point(1, 10),\n Point(2, 3), Point(2, 4), Point(2, 9), Point(2, 10),\n Point(3, 3), Point(3, 4), Point(3, 9), Point(3, 10),\n Point(6, 1), Point(6, 2), Point(6, 3), Point(6, 4), Point(6, 9), Point(6, 10), Point(6, 11),\n Point(6, 12),\n Point(7, 1), Point(7, 2), Point(7, 3), Point(7, 4), Point(7, 9), Point(7, 10), Point(7, 11),\n Point(7, 12),\n Point(10, 3), Point(10, 4), Point(10, 9), Point(10, 10),\n Point(11, 3), Point(11, 4), Point(11, 9), Point(11, 10),\n Point(12, 3), Point(12, 4), Point(12, 9), Point(12, 10)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_9(self):\n wall_pos = [Point(3, 5), Point(3, 6),\n Point(4, 4), Point(4, 10),\n Point(5, 4), Point(5, 5), Point(5, 7), Point(5, 8), Point(5, 9),\n Point(6, 4), Point(6, 9),\n Point(7, 3),\n Point(8, 5), Point(8, 6), Point(8, 7), Point(8, 8),\n Point(9, 2), Point(9, 6),\n Point(10, 3), Point(10, 4), Point(10, 6), Point(10, 10),\n Point(11, 2), Point(11, 9), Point(11, 11),\n Point(12, 10)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def generate_rand_wall(self):\n fixnum = np.random.uniform()\n if fixnum < 0.5:\n randomnum = np.random.randint(1, 9)\n funlist = {1: self.create_fix_wall_1, 2: self.create_fix_wall_2, 3: self.create_fix_wall_3,\n 4: self.create_fix_wall_4, 5: self.create_fix_wall_5, 6: self.create_fix_wall_6,\n 7: self.create_fix_wall_7, 8: self.create_fix_wall_8, 9: self.create_fix_wall_9}\n funlist[randomnum]()\n return\n self.generate_wall()\n\n # empty_pos = []\n # for i in range(1, self._num_rows - 1):\n # for j in range(1, self._num_cols - 1):\n # t = self._content[i][j].type\n # if t == PointType.EMPTY:\n # empty_pos.append(Pos(i, j))\n\n # empty_pos = self.field.get_empty_cell()\n # wall num\n # wallNum = np.random.randint(10, 50)\n # wall4rate = np.random.uniform()\n # h_pos = None\n # if wall4rate < 0.5:\n # if empty_pos:\n # h_pos = random.choice(empty_pos)\n # w_pos1 = h_pos.adj(Direc.LEFT)\n # w_pos2 = h_pos.adj(Direc.UP)\n # w_pos3 = h_pos.adj(Direc.RIGHT)\n # w_pos4 = h_pos.adj(Direc.DOWN)\n # for pos in [w_pos1, w_pos2, w_pos3, w_pos4]:\n # if pos in empty_pos:\n # self.create_wall(pos)\n # empty_pos.remove(pos)\n # wallNum -= 1\n\n # while wallNum > 0:\n # w_pos = random.choice(empty_pos)\n # if h_pos != w_pos:\n # self.create_wall(w_pos)\n # empty_pos.remove(w_pos)\n # wallNum -= 1\n\n def generate_wall(self):\n # emptyNum = len(self.field._empty_cells)\n randnum = np.random.randint(10, 60)\n i=0\n while(i<randnum):\n pos = random.choice(self.field.get_empty_cell())\n i+=1\n self.field[pos] = CellType.WALL\n\n def generate_emeny(self, position=None):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n if position is None:\n position = self.field.get_random_empty_cell()\n self.enemy = position\n self.field[position] = CellType.SNAKE_BODY\n if np.random.random() > 0.2:\n if (self.field[position + SnakeDirection.NORTH] == CellType.EMPTY):\n self.field[position + SnakeDirection.NORTH] = CellType.FRUIT\n self.fruit.append(position + SnakeDirection.NORTH)\n if (self.field[position + SnakeDirection.SOUTH] == CellType.EMPTY):\n self.field[position + SnakeDirection.SOUTH] = CellType.FRUIT\n self.fruit.append(position + SnakeDirection.SOUTH)\n if (self.field[position + SnakeDirection.WEST] == CellType.EMPTY):\n self.field[position + SnakeDirection.WEST] = CellType.FRUIT\n self.fruit.append(position + SnakeDirection.WEST)\n if (self.field[position + SnakeDirection.EAST] == CellType.EMPTY):\n self.field[position + SnakeDirection.EAST] = CellType.FRUIT\n self.fruit.append(position + SnakeDirection.EAST)\n if np.random.random() < 0.1:\n position = self.field.get_random_empty_cell()\n self.field[position] = CellType.FRUIT\n self.fruit.append(position)\n if np.random.random() < 0.1:\n position = self.field.get_random_empty_cell()\n self.field[position] = CellType.FRUIT\n self.fruit.append(position)\n\n def generate_snake(self, snake=None):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n self.snake = snake\n self.field.place_snake(self.snake)\n\n def generate_poison(self):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n if np.random.random() < 0:\n self.poison_num = random.Random().choice([1, 2, 3])\n for position in self.field.get_empty_cell():\n if (0 < position.x <= self.poison_num or 0 < position.y <= self.poison_num or (\n position.x + self.poison_num) >= (self.field.size - 1) or (position.y + self.poison_num) >= (\n self.field.size - 1)):\n self.field[position] = CellType.POISON\n self.poison.append(position)\n\n def be_poison(self, position):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n # if np.random.random() < 1:\n if (0 < position.x <= self.poison_num or 0 < position.y <= self.poison_num or (\n position.x + self.poison_num) >= (self.field.size - 1) or (position.y + self.poison_num) >= (\n self.field.size - 1)):\n return True\n return False\n\n def timestep(self):\n \"\"\" Execute the timestep and return the new observable state. \"\"\"\n\n self.timestep_index += 1\n reward = 0\n isdie = False\n old_head = self.snake.head\n old_tail = self.snake.tail\n\n # Are we about to eat the fruit?\n if self.fruit.__contains__(self.snake.peek_next_move()):\n self.fruit.remove(self.snake.peek_next_move())\n # self.generate_fruit()\n # old_tail = None\n reward += self.rewards['ate_fruit']\n self.stats.fruits_eaten += 1\n elif self.be_poison(self.snake.peek_next_move()):\n self.stats.poisons_eaten += 1\n # If not, just move forward.\n\n self.snake.move()\n\n self.field.update_snake_footprint(old_head, old_tail, self.snake.head)\n\n # Hit a wall or own body?\n if not self.is_alive():\n # reward -=self.fruit.__len__()\n if self.has_hit_wall() or self.has_hit_own_body():\n self.stats.termination_reason = 'hit_wall'\n reward -= 0.7\n isdie = True\n self.field[self.snake.head] = CellType.SNAKE_HEAD\n self.is_game_over = True\n # reward *= 0.7\n # print(self.fruit.__len__())\n # if(self.get_wall_num(old_head) >= 2) and self.fruit.__len__()<=1:\n # reward = self.get_wall_num(old_head) - self.fruit.__len__()\n # else:\n # reward = -1\n reward += (self.get_wall_num(old_head) - 1.5)\n if self.snake.length == 2 or self.snake.length == 1:\n reward -= 2\n\n if self.stats.poisons_eaten != 0:\n reward -= 2\n\n if (self.be_poison(old_head)):\n reward -= 1\n\n # reward += 0.99\n # Exceeded the limit of moves?\n if self.timestep_index >= self.max_step_limit:\n self.is_game_over = True\n self.stats.termination_reason = 'timestep_limit_exceeded'\n\n result = TimestepResult(\n observation=self.get_observation(),\n reward=reward,\n is_episode_end=self.is_game_over\n )\n\n self.record_timestep_stats(result)\n return result\n\n def get_wall_num(self, position=None):\n num = 0\n if self.field[position + SnakeDirection.NORTH] == CellType.WALL:\n num += 1\n if self.field[position + SnakeDirection.SOUTH] == CellType.WALL:\n num += 1\n if self.field[position + SnakeDirection.WEST] == CellType.WALL:\n num += 1\n if self.field[position + SnakeDirection.EAST] == CellType.WALL:\n num += 1\n if self.field[\n position + SnakeDirection.NORTH] == CellType.POISON:\n num += 0.5\n if self.field[\n position + SnakeDirection.SOUTH] == CellType.POISON:\n num += 0.5\n if self.field[\n position + SnakeDirection.WEST] == CellType.POISON:\n num += 0.5\n if self.field[\n position + SnakeDirection.EAST] == CellType.POISON:\n num += 0.5\n return num\n\n def generate_fruit(self, position=None):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n if position is None:\n position = self.field.get_random_empty_cell()\n self.field[position] = CellType.FRUIT\n self.fruit.append(position)\n\n def has_hit_wall(self):\n \"\"\" True if the snake has hit a wall, False otherwise. \"\"\"\n return self.field[self.snake.head] == CellType.WALL\n\n def has_hit_own_body(self):\n \"\"\" True if the snake has hit its own body, False otherwise. \"\"\"\n return self.field[self.snake.head] == CellType.SNAKE_BODY\n\n def is_alive(self):\n \"\"\" True if the snake is still alive, False otherwise. \"\"\"\n return not self.has_hit_wall() and not self.has_hit_own_body()\n\n\nclass TimestepResult(object):\n \"\"\" Represents the information provided to the agent after each timestep. \"\"\"\n\n def __init__(self, observation, reward, is_episode_end):\n self.observation = observation\n self.reward = reward\n self.is_episode_end = is_episode_end\n\n def __str__(self):\n field_map = '\\n'.join([\n ''.join(str(cell) for cell in row)\n for row in self.observation\n ])\n return '{field_map}\\nR = {self.reward} {self.is_episode_end}\\n'\n\n\nclass EpisodeStatistics(object):\n \"\"\" Represents the summary of the agent's performance during the episode. \"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n \"\"\" Forget all previous statistics and prepare for a new episode. \"\"\"\n self.timesteps_survived = 0\n self.sum_episode_rewards = 0\n self.fruits_eaten = 0\n self.poisons_eaten = 0\n self.termination_reason = None\n self.action_counter = {\n action: 0\n for action in ALL_SNAKE_ACTIONS\n }\n\n def record_timestep(self, action, result):\n \"\"\" Update the stats based on the current timestep results. \"\"\"\n self.sum_episode_rewards += result.reward\n if action is not None:\n self.action_counter[action] += 1\n\n def flatten(self):\n \"\"\" Format all episode statistics as a flat object. \"\"\"\n flat_stats = {\n 'timesteps_survived': self.timesteps_survived,\n 'sum_episode_rewards': self.sum_episode_rewards,\n 'mean_reward': self.sum_episode_rewards / self.timesteps_survived if self.timesteps_survived else None,\n 'fruits_eaten': self.fruits_eaten,\n 'termination_reason': self.termination_reason,\n }\n flat_stats.update({\n 'action_counter_{action}': self.action_counter.get(action, 0)\n for action in ALL_SNAKE_ACTIONS\n })\n return flat_stats\n\n def to_dataframe(self):\n \"\"\" Convert the episode statistics to a Pandas data frame. \"\"\"\n return pd.DataFrame([self.flatten()])\n\n def __str__(self):\n return pprint.pformat(self.flatten())\n" ]
[ [ "numpy.random.uniform", "numpy.random.seed", "numpy.copy", "numpy.random.random", "numpy.random.randint" ] ]
KennyKangMPC/chebpy
[ "5ad603b15f90a0f36093f1705e3e08d090330cef" ]
[ "tests/test_bndfun.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Unit-tests for pyfun/core/bndfun.py\"\"\"\n\nfrom __future__ import division\n\nimport itertools\nimport operator\nimport unittest\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom chebpy.core.bndfun import Bndfun\nfrom chebpy.core.chebtech import Chebtech2\nfrom chebpy.core.settings import DefaultPrefs\nfrom chebpy.core.utilities import Interval\nfrom chebpy.core.algorithms import standard_chop\n\nfrom tests.utilities import testfunctions, infnorm\n\n# aliases\npi = np.pi\nsin = np.sin\ncos = np.cos\nexp = np.exp\neps = DefaultPrefs.eps\n\n# NOTE: since (Fun/ClassicFun/)Bndfun is not a user-facing class (although it\n# is not abstract) we will test the interface in the way Chebfun will interact\n# with it, which means working explcitly with Interval objects. Furthermore,\n# since we have already tested the adaptive constructor in the Chebtech-level\n# tests, we just use the adaptive constructor in these tests.\n\nclass ClassUsage(unittest.TestCase):\n \"\"\"Unit-tests for miscelaneous Bndfun class usage\"\"\"\n\n def setUp(self):\n f = lambda x: sin(30*x)\n subinterval = Interval(-2,3)\n self.f = f\n self.ff = Bndfun.initfun_adaptive(f, subinterval)\n self.xx = subinterval(np.linspace(-1,1,100))\n self.emptyfun = Bndfun(Chebtech2.initempty(), subinterval)\n self.constfun = Bndfun(Chebtech2.initconst(1.), subinterval)\n\n # tests for emptiness of Bndfun objects\n def test_isempty_True(self):\n self.assertTrue(self.emptyfun.isempty)\n self.assertFalse(not self.emptyfun.isempty)\n\n def test_isempty_False(self):\n self.assertFalse(self.constfun.isempty)\n self.assertTrue(not self.constfun.isempty)\n\n # tests for constantness of Bndfun objects\n def test_isconst_True(self):\n self.assertTrue(self.constfun.isconst)\n self.assertFalse(not self.constfun.isconst)\n\n def test_isconst_False(self):\n self.assertFalse(self.emptyfun.isconst)\n self.assertTrue(not self.emptyfun.isconst)\n\n # check the size() method is working properly\n def test_size(self):\n cfs = np.random.rand(10)\n subinterval = Interval()\n b0 = Bndfun(Chebtech2(np.array([])), subinterval)\n b1 = Bndfun(Chebtech2(np.array([1.])), subinterval)\n b2 = Bndfun(Chebtech2(cfs), subinterval)\n self.assertEquals(b0.size, 0)\n self.assertEquals(b1.size, 1)\n self.assertEquals(b2.size, cfs.size)\n\n def test_support(self):\n a, b = self.ff.support\n self.assertEqual(a, -2)\n self.assertEqual(b, 3)\n\n def test_endvalues(self):\n a, b = self.ff.support\n fa, fb = self.ff.endvalues\n self.assertLessEqual(abs(fa-self.f(a)), 2e1*eps)\n self.assertLessEqual(abs(fb-self.f(b)), 2e1*eps)\n\n # test the different permutations of self(xx, ..)\n def test_call(self):\n self.ff(self.xx)\n\n def test_call_bary(self):\n self.ff(self.xx, \"bary\")\n self.ff(self.xx, how=\"bary\")\n\n def test_call_clenshaw(self):\n self.ff(self.xx, \"clenshaw\")\n self.ff(self.xx, how=\"clenshaw\")\n\n def test_call_bary_vs_clenshaw(self):\n b = self.ff(self.xx, \"clenshaw\")\n c = self.ff(self.xx, \"bary\")\n self.assertLessEqual(infnorm(b-c), 2e2*eps)\n\n def test_call_raises(self):\n self.assertRaises(ValueError, self.ff, self.xx, \"notamethod\")\n self.assertRaises(ValueError, self.ff, self.xx, how=\"notamethod\")\n\n def test_vscale_empty(self):\n self.assertEquals(self.emptyfun.vscale, 0.)\n\n def test_copy(self):\n ff = self.ff\n gg = self.ff.copy()\n self.assertEquals(ff, ff)\n self.assertEquals(gg, gg)\n self.assertNotEquals(ff, gg)\n self.assertEquals(infnorm(ff.coeffs-gg.coeffs), 0)\n\n # check that the restricted fun matches self on the subinterval\n def test_restrict(self):\n i1 = Interval(-1,1)\n gg = self.ff.restrict(i1)\n yy = np.linspace(-1,1,1000)\n self.assertLessEqual(infnorm(self.ff(yy)-gg(yy)), 1e2*eps)\n\n # check that the restricted fun matches self on the subinterval\n def test_simplify(self):\n interval = Interval(-2,1)\n ff = Bndfun.initfun_fixedlen(self.f, interval, 1000)\n gg = ff.simplify()\n self.assertEqual(gg.size, standard_chop(ff.onefun.coeffs))\n self.assertEqual(infnorm(ff.coeffs[:gg.size]-gg.coeffs), 0)\n self.assertEqual(ff.interval, gg.interval)\n# --------------------------------------\n# vscale estimates\n# --------------------------------------\nvscales = [\n # (function, number of points, vscale)\n (lambda x: sin(4*pi*x), [-2, 2], 1),\n (lambda x: cos(x), [-10, 1], 1),\n (lambda x: cos(4*pi*x), [-100, 100], 1),\n (lambda x: exp(cos(4*pi*x)), [-1,1], exp(1)),\n (lambda x: cos(3244*x), [-2,0], 1),\n (lambda x: exp(x), [-1,2], exp(2)),\n (lambda x: 1e10*exp(x), [-1,1], 1e10*exp(1)),\n (lambda x: 0*x+1., [-1e5,1e4], 1),\n]\n\ndef definiteIntegralTester(fun, interval, vscale):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n def tester(self):\n absdiff = abs(ff.vscale-vscale)\n self.assertLessEqual(absdiff, .1*vscale)\n return tester\n\nfor k, args in enumerate(vscales):\n _testfun_ = definiteIntegralTester(*args)\n _testfun_.__name__ = \"test_vscale_{:02}\".format(k)\n setattr(ClassUsage, _testfun_.__name__, _testfun_)\n\n\nclass Plotting(unittest.TestCase):\n \"\"\"Unit-tests for Bndfun plotting methods\"\"\"\n\n def setUp(self):\n f = lambda x: sin(1*x) + 5e-1*cos(10*x) + 5e-3*sin(100*x)\n subinterval = Interval(-6, 10)\n self.f0 = Bndfun.initfun_fixedlen(f, subinterval, 1000)\n self.f1 = Bndfun.initfun_adaptive(f, subinterval)\n\n def test_plot(self):\n fig, ax = plt.subplots()\n self.f0.plot(ax=ax, color=\"g\", marker=\"o\", markersize=2, linestyle=\"\")\n\n def test_plotcoeffs(self):\n fig, ax = plt.subplots()\n self.f0.plotcoeffs(ax=ax)\n self.f1.plotcoeffs(ax=ax, color=\"r\")\n\n\n\nclass Calculus(unittest.TestCase):\n \"\"\"Unit-tests for Bndfun calculus operations\"\"\"\n\n def setUp(self):\n self.emptyfun = Bndfun(Chebtech2.initempty(), Interval())\n self.yy = np.linspace(-1,1,2000)\n# self.constfun = Bndfun(Chebtech2.initconst(1.), subinterval)\n\n # tests for the correct results in the empty cases\n def test_sum_empty(self):\n self.assertEqual(self.emptyfun.sum(), 0)\n\n def test_cumsum_empty(self):\n self.assertTrue(self.emptyfun.cumsum().isempty)\n\n def test_diff_empty(self):\n self.assertTrue(self.emptyfun.diff().isempty)\n\n# --------------------------------------\n# definite integrals\n# --------------------------------------\ndef_integrals = [\n # (function, interval, integral, tolerance)\n (lambda x: sin(x), [-2,2], .0, 2*eps),\n (lambda x: sin(4*pi*x), [-.1, .7], 0.088970317927147, 1e1*eps),\n (lambda x: cos(x), [-100,203], 0.426944059057085, 4e2*eps),\n (lambda x: cos(4*pi*x), [-1e-1,-1e-3], 0.074682699182803, 2*eps),\n (lambda x: exp(cos(4*pi*x)), [-3,1], 5.064263511008033, 4*eps),\n (lambda x: cos(3244*x), [0,0.4], -3.758628487169980e-05, 5e2*eps),\n (lambda x: exp(x), [-2,-1], exp(-1)-exp(-2), 2*eps),\n (lambda x: 1e10*exp(x), [-1,2], 1e10*(exp(2)-exp(-1)), 2e10*eps),\n (lambda x: 0*x+1., [-100,300], 400, eps),\n]\n\ndef definiteIntegralTester(fun, interval, integral, tol):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n def tester(self):\n absdiff = abs(ff.sum()-integral)\n self.assertLessEqual(absdiff, tol)\n return tester\n\nfor k, (fun, n, integral, tol) in enumerate(def_integrals):\n _testfun_ = definiteIntegralTester(fun, n, integral, tol)\n _testfun_.__name__ = \"test_sum_{:02}\".format(k)\n setattr(Calculus, _testfun_.__name__, _testfun_)\n\n# --------------------------------------\n# indefinite integrals\n# --------------------------------------\nindef_integrals = [\n # (function, indefinite integral, interval, tolerance)\n (lambda x: 0*x+1., lambda x: x, [-2,3], eps),\n (lambda x: x, lambda x: 1/2*x**2, [-5,0], 4*eps),\n (lambda x: x**2, lambda x: 1/3*x**3, [1,10], 2e2*eps),\n (lambda x: x**3, lambda x: 1/4*x**4, [-1e-2,4e-1], 2*eps),\n (lambda x: x**4, lambda x: 1/5*x**5, [-3,-2], 3e2*eps),\n (lambda x: x**5, lambda x: 1/6*x**6, [-1e-10,1], 4*eps),\n (lambda x: sin(x), lambda x: -cos(x), [-10,22], 3e1*eps),\n (lambda x: cos(3*x), lambda x: 1./3*sin(3*x), [-3,4], 2*eps),\n (lambda x: exp(x), lambda x: exp(x), [-60,1], 1e1*eps),\n (lambda x: 1e10*exp(x), lambda x: 1e10*exp(x), [-1,1], 1e10*(3*eps)),\n]\n\ndef indefiniteIntegralTester(fun, ifn, interval, tol):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n gg = Bndfun.initfun_fixedlen(ifn, subinterval, ff.size+1)\n coeffs = gg.coeffs\n coeffs[0] = coeffs[0] - ifn(np.array([interval[0]]))\n def tester(self):\n absdiff = infnorm(ff.cumsum().coeffs - coeffs)\n self.assertLessEqual(absdiff, tol)\n return tester\n\nfor k, (fun, dfn, n, tol) in enumerate(indef_integrals):\n _testfun_ = indefiniteIntegralTester(fun, dfn, n, tol)\n _testfun_.__name__ = \"test_cumsum_{:02}\".format(k)\n setattr(Calculus, _testfun_.__name__, _testfun_)\n\n# --------------------------------------\n# derivatives\n# --------------------------------------\nderivatives = [\n# (function, derivative, number of points, tolerance)\n (lambda x: 0*x+1., lambda x: 0*x+0, [-2,3], eps),\n (lambda x: x, lambda x: 0*x+1, [-5,0], 2e1*eps),\n (lambda x: x**2, lambda x: 2*x, [1,10], 2e2*eps),\n (lambda x: x**3, lambda x: 3*x**2, [-1e-2,4e-1], 3*eps),\n (lambda x: x**4, lambda x: 4*x**3, [-3,-2], 1e3*eps),\n (lambda x: x**5, lambda x: 5*x**4, [-1e-10,1], 4e1*eps),\n (lambda x: sin(x), lambda x: cos(x), [-10,22], 5e2*eps),\n (lambda x: cos(3*x), lambda x: -3*sin(3*x), [-3,4], 5e2*eps),\n (lambda x: exp(x), lambda x: exp(x), [-60,1], 2e2*eps),\n (lambda x: 1e10*exp(x), lambda x: 1e10*exp(x), [-1,1], 1e10*2e2*eps),\n]\n\ndef derivativeTester(fun, ifn, interval, tol):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n gg = Bndfun.initfun_fixedlen(ifn, subinterval, max(ff.size-1,1))\n def tester(self):\n absdiff = infnorm(ff.diff().coeffs - gg.coeffs)\n self.assertLessEqual(absdiff, tol)\n return tester\n\nfor k, (fun, der, n, tol) in enumerate(derivatives):\n _testfun_ = derivativeTester(fun, der, n, tol)\n _testfun_.__name__ = \"test_diff_{:02}\".format(k)\n setattr(Calculus, _testfun_.__name__, _testfun_)\n\n\nclass Construction(unittest.TestCase):\n \"\"\"Unit-tests for construction of Bndfun objects\"\"\"\n\n def test_onefun_construction(self):\n coeffs = np.random.rand(10)\n subinterval = Interval()\n onefun = Chebtech2(coeffs)\n f = Bndfun(onefun, subinterval)\n self.assertIsInstance(f, Bndfun)\n self.assertLess(infnorm(f.coeffs-coeffs), eps)\n\n def test_const_construction(self):\n subinterval = Interval()\n ff = Bndfun.initconst(1., subinterval)\n self.assertEquals(ff.size, 1)\n self.assertTrue(ff.isconst)\n self.assertFalse(ff.isempty)\n self.assertRaises(ValueError, Bndfun.initconst, [1.], subinterval)\n\n def test_empty_construction(self):\n ff = Bndfun.initempty()\n self.assertEquals(ff.size, 0)\n self.assertFalse(ff.isconst)\n self.assertTrue(ff.isempty)\n self.assertRaises(TypeError, Bndfun.initempty, [1.])\n\n def test_identity_construction(self):\n for (a,b) in [(-1,1), (-10,-2), (-2.3, 1.24), (20,2000)]:\n itvl = Interval(a,b)\n ff = Bndfun.initidentity(itvl)\n self.assertEquals(ff.size, 2)\n xx = np.linspace(a,b,1001)\n tol = eps * abs(itvl).max()\n self.assertLessEqual(infnorm(ff(xx)-xx), tol)\n\ndef adaptiveTester(fun, subinterval, funlen):\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n def tester(self):\n self.assertEquals(ff.size, funlen)\n return tester\n\ndef fixedlenTester(fun, subinterval, n):\n ff = Bndfun.initfun_fixedlen(fun, subinterval, n)\n def tester(self):\n self.assertEquals(ff.size, n)\n return tester\n\nfuns = []\nfun_details = [\n # (function, name for the test printouts,\n # Matlab chebfun adaptive degree on [-2,3])\n (lambda x: x**3 + x**2 + x + 1, \"poly3(x)\", [-2,3], 4),\n (lambda x: exp(x), \"exp(x)\", [-2,3], 20),\n (lambda x: sin(x), \"sin(x)\", [-2,3], 20),\n (lambda x: cos(20*x), \"cos(20x)\", [-2,3], 90),\n (lambda x: 0.*x+1., \"constfun\", [-2,3], 1),\n (lambda x: 0.*x, \"zerofun\", [-2,3], 1),\n]\n\nfor k, (fun, name, interval, funlen) in enumerate(fun_details):\n\n fun.__name__ = name\n subinterval = Interval(*interval)\n\n # add the adaptive tests\n _testfun_ = adaptiveTester(fun, subinterval, funlen)\n _testfun_.__name__ = \"test_adaptive_{}\".format(fun.__name__)\n setattr(Construction, _testfun_.__name__, _testfun_)\n\n # add the fixedlen tests\n for n in np.array([100]):\n _testfun_ = fixedlenTester(fun, subinterval, n)\n _testfun_.__name__ = \\\n \"test_fixedlen_{}_{:003}pts\".format(fun.__name__, n)\n setattr(Construction, _testfun_.__name__, _testfun_)\n\n\nclass Algebra(unittest.TestCase):\n \"\"\"Unit-tests for Bndfun algebraic operations\"\"\"\n def setUp(self):\n self.yy = np.linspace(-1,1,1000)\n self.emptyfun = Bndfun.initempty()\n\n # check (empty Bndfun) + (Bndfun) = (empty Bndfun)\n # and (Bndfun) + (empty Bndfun) = (empty Bndfun)\n def test__add__radd__empty(self):\n subinterval = Interval(-2,3)\n for (fun, _, _) in testfunctions:\n chebtech = Bndfun.initfun_adaptive(fun, subinterval)\n self.assertTrue((self.emptyfun+chebtech).isempty)\n self.assertTrue((chebtech+self.emptyfun).isempty)\n\n # check the output of (constant + Bndfun)\n # and (Bndfun + constant)\n def test__add__radd__constant(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for (fun, _, _) in testfunctions:\n for const in (-1, 1, 10, -1e5):\n f = lambda x: const + fun(x)\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n f1 = const + bndfun\n f2 = bndfun + const\n tol = 4e1 * eps * abs(const)\n self.assertLessEqual(infnorm(f(xx)-f1(xx)), tol)\n self.assertLessEqual(infnorm(f(xx)-f2(xx)), tol)\n\n # check (empty Bndfun) - (Bndfun) = (empty Bndfun)\n # and (Bndfun) - (empty Bndfun) = (empty Bndfun)\n def test__sub__rsub__empty(self):\n subinterval = Interval(-2,3)\n for (fun, _, _) in testfunctions:\n chebtech = Bndfun.initfun_adaptive(fun, subinterval)\n self.assertTrue((self.emptyfun-chebtech).isempty)\n self.assertTrue((chebtech-self.emptyfun).isempty)\n\n # check the output of constant - Bndfun\n # and Bndfun - constant\n def test__sub__rsub__constant(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for (fun, _, _) in testfunctions:\n for const in (-1, 1, 10, -1e5):\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n f = lambda x: const - fun(x)\n g = lambda x: fun(x) - const\n ff = const - bndfun\n gg = bndfun - const\n tol = 5e1 * eps * abs(const)\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), tol)\n self.assertLessEqual(infnorm(g(xx)-gg(xx)), tol)\n\n # check (empty Bndfun) * (Bndfun) = (empty Bndfun)\n # and (Bndfun) * (empty Bndfun) = (empty Bndfun)\n def test__mul__rmul__empty(self):\n subinterval = Interval(-2,3)\n for (fun, _, _) in testfunctions:\n chebtech = Bndfun.initfun_adaptive(fun, subinterval)\n self.assertTrue((self.emptyfun*chebtech).isempty)\n self.assertTrue((chebtech*self.emptyfun).isempty)\n\n # check the output of constant * Bndfun\n # and Bndfun * constant\n def test__mul__rmul__constant(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for (fun, _, _) in testfunctions:\n for const in (-1, 1, 10, -1e5):\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n f = lambda x: const * fun(x)\n g = lambda x: fun(x) * const\n ff = const * bndfun\n gg = bndfun * const\n tol = 4e1 * eps * abs(const)\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), tol)\n self.assertLessEqual(infnorm(g(xx)-gg(xx)), tol)\n\n # check (empty Bndfun) / (Bndfun) = (empty Bndfun)\n # and (Bndfun) / (empty Bndfun) = (empty Bndfun)\n def test_truediv_empty(self):\n subinterval = Interval(-2,3)\n for (fun, _, _) in testfunctions:\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n self.assertTrue(operator.truediv(self.emptyfun, bndfun).isempty)\n self.assertTrue(operator.truediv(self.emptyfun, bndfun).isempty)\n # __truediv__\n self.assertTrue((self.emptyfun/bndfun).isempty)\n self.assertTrue((bndfun/self.emptyfun).isempty)\n\n # check the output of constant / Bndfun\n # and Bndfun / constant\n def test_truediv_constant(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for (fun, _, hasRoots) in testfunctions:\n for const in (-1, 1, 10, -1e5):\n hscl = abs(subinterval).max()\n tol = hscl * eps * abs(const)\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n g = lambda x: fun(x) / const\n gg = bndfun / const\n self.assertLessEqual(infnorm(g(xx)-gg(xx)), 3*gg.size*tol)\n # don't do the following test for functions with roots\n if not hasRoots:\n f = lambda x: const / fun(x)\n ff = const / bndfun\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), 2*ff.size*tol)\n\n # check +(empty Bndfun) = (empty Bndfun)\n def test__pos__empty(self):\n self.assertTrue((+self.emptyfun).isempty)\n\n # check -(empty Bndfun) = (empty Bndfun)\n def test__neg__empty(self):\n self.assertTrue((-self.emptyfun).isempty)\n\n # check (empty Bndfun) ** c = (empty Bndfun)\n def test_pow_empty(self):\n for c in range(10):\n self.assertTrue((self.emptyfun**c).isempty)\n\n # check c ** (empty Bndfun) = (empty Bndfun)\n def test_rpow_empty(self):\n for c in range(10):\n self.assertTrue((c**self.emptyfun).isempty)\n\n # check the output of Bndfun ** constant\n def test_pow_const(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for func in (np.sin, np.exp, np.cos):\n for c in (1, 2):\n f = lambda x: func(x) ** c\n ff = Bndfun.initfun_adaptive(func, subinterval) ** c\n tol = 2e1 * eps * abs(c)\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), tol)\n\n # check the output of constant ** Bndfun\n def test_rpow_const(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for func in (np.sin, np.exp, np.cos):\n for c in (1, 2):\n f = lambda x: c ** func(x)\n ff = c ** Bndfun.initfun_adaptive(func, subinterval)\n tol = 1e1 * eps * abs(c)\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), tol)\n\nbinops = (operator.add, operator.mul, operator.sub, operator.truediv)\n\n# add tests for the binary operators\ndef binaryOpTester(f, g, subinterval, binop):\n ff = Bndfun.initfun_adaptive(f, subinterval)\n gg = Bndfun.initfun_adaptive(g, subinterval)\n FG = lambda x: binop(f(x),g(x))\n fg = binop(ff, gg)\n def tester(self):\n vscl = max([ff.vscale, gg.vscale])\n lscl = max([ff.size, gg.size])\n xx = subinterval(self.yy)\n self.assertLessEqual(infnorm(fg(xx)-FG(xx)), 6*vscl*lscl*eps)\n return tester\n\n# Note: defining __radd__(a,b) = __add__(b,a) and feeding this into the\n# test will not in fact test the __radd__ functionality of the class.\n# These tests will need to be added manually.\n\nsubintervals = (\n Interval(-.5,.9),\n Interval(-1.2, 1.3),\n Interval(-2.2, -1.9),\n Interval(0.4, 1.3),\n)\n\nfor binop in binops:\n # add the generic binary operator tests\n for (f, _, _), (g, _, denomRoots) in \\\n itertools.combinations(testfunctions, 2):\n for subinterval in subintervals:\n if binop is operator.truediv and denomRoots:\n # skip truediv test if denominator has roots on the real line\n pass\n else:\n _testfun_ = binaryOpTester(f, g, subinterval, binop)\n a, b = subinterval\n _testfun_.__name__ = \\\n \"test_{}_{}_{}_[{:.1f},{:.1f}]\".format(\n binop.__name__, f.__name__, g.__name__, a, b)\n setattr(Algebra, _testfun_.__name__, _testfun_)\n\npowtestfuns = (\n [(np.exp, 'exp'), (np.sin, 'sin')],\n [(np.exp, 'exp'), (lambda x: 2-x, 'linear')],\n [(lambda x: 2-x, 'linear'), (np.exp, 'exp')],\n)\n# add operator.power tests\nfor (f, namef), (g, nameg) in powtestfuns:\n for subinterval in subintervals:\n _testfun_ = binaryOpTester(f, g, subinterval, operator.pow)\n a, b = subinterval\n _testfun_.__name__ = \\\n \"test_{}_{}_{}_[{:.1f},{:.1f}]\".format(\n 'pow', namef, nameg, a, b)\n setattr(Algebra, _testfun_.__name__, _testfun_)\n\nunaryops = (operator.pos, operator.neg)\n\n# add tests for the unary operators\ndef unaryOpTester(unaryop, f, subinterval):\n ff = Bndfun.initfun_adaptive(f, subinterval)\n gg = lambda x: unaryop(f(x))\n GG = unaryop(ff)\n def tester(self):\n xx = subinterval(self.yy)\n self.assertLessEqual(infnorm(gg(xx)-GG(xx)), 4e1*eps)\n return tester\n\nfor unaryop in unaryops:\n for (f, _, _) in testfunctions:\n subinterval = Interval(-.5,.9)\n _testfun_ = unaryOpTester(unaryop, f, subinterval)\n _testfun_.__name__ = \\\n \"test_{}_{}\".format(unaryop.__name__, f.__name__)\n setattr(Algebra, _testfun_.__name__, _testfun_)\n\n\nclass Ufuncs(unittest.TestCase):\n \"\"\"Unit-tests for Bndfun numpy ufunc overloads\"\"\"\n def setUp(self):\n self.yy = np.linspace(-1,1,1000)\n self.emptyfun = Bndfun.initempty()\n\nufuncs = (np.absolute, np.arccos, np.arccosh, np.arcsin, np.arcsinh, np.arctan,\n np.arctanh, np.cos, np.cosh, np.exp, np.exp2, np.expm1, np.log,\n np.log2, np.log10, np.log1p, np.sinh, np.sin, np.tan, np.tanh,\n np.sqrt)\n\n# empty-case tests\ndef ufuncEmptyCaseTester(ufunc):\n def tester(self):\n self.assertTrue(getattr(self.emptyfun, ufunc.__name__)().isempty)\n return tester\n\nfor ufunc in ufuncs:\n _testfun_ = ufuncEmptyCaseTester(ufunc)\n _testfun_.__name__ = \"test_emptycase_{}\".format(ufunc.__name__)\n setattr(Ufuncs, _testfun_.__name__, _testfun_)\n\n# TODO: Add more test cases\n# add ufunc tests:\n# (ufunc, [([fun1, interval1], tol1), ([fun2, interval2], tol2), ... ])\n\nuf1 = lambda x: x\nuf1.__name__ = \"x\"\nuf2 = lambda x: sin(x-.5)\nuf2.__name__ = \"sin(x-.5)\"\nuf3 = lambda x: sin(25*x-1)\nuf3.__name__ = \"sin(25*x-1)\"\n\nufunc_test_params = [\n (np.absolute, [([uf1, (-3,-.5)], eps), ]),\n (np.arccos, [([uf1, (-.8,.8)], eps), ]),\n (np.arccosh, [([uf1, (2,3) ], eps), ]),\n (np.arcsin, [([uf1, (-.8,.8)], eps), ]),\n (np.arcsinh, [([uf1, (2,3) ], eps), ]),\n (np.arctan, [([uf1, (-.8,.8)], eps), ]),\n (np.arctanh, [([uf1, (-.8,.8)], eps), ]),\n (np.cos, [([uf1, (-3,3) ], eps), ]),\n (np.cosh, [([uf1, (-3,3) ], eps), ]),\n (np.exp, [([uf1, (-3,3) ], eps), ]),\n (np.exp2, [([uf1, (-3,3) ], eps), ]),\n (np.expm1, [([uf1, (-3,3) ], eps), ]),\n (np.log, [([uf1, (2,3) ], eps), ]),\n (np.log2, [([uf1, (2,3) ], eps), ]),\n (np.log10, [([uf1, (2,3) ], eps), ]),\n (np.log1p, [([uf1, (-.8,.8)], eps), ]),\n (np.sinh, [([uf1, (-3,3) ], eps), ]),\n (np.sin, [([uf1, (-3,3) ], eps), ]),\n (np.tan, [([uf1, (-.8,.8)], eps), ]),\n (np.tanh, [([uf1, (-3,3) ], eps), ]),\n (np.sqrt, [([uf1, (2,3) ], eps), ]),\n\n (np.cos, [([uf2, (-3,3) ], eps), ]),\n (np.cosh, [([uf2, (-3,3) ], eps), ]),\n (np.exp, [([uf2, (-3,3) ], eps), ]),\n (np.expm1, [([uf2, (-3,3) ], eps), ]),\n (np.sinh, [([uf2, (-3,3) ], eps), ]),\n (np.sin, [([uf2, (-3,3) ], eps), ]),\n (np.tan, [([uf2, (-.8,.8)], eps), ]),\n (np.tanh, [([uf2, (-3,3) ], eps), ]),\n\n (np.cos, [([uf3, (-3,3) ], eps), ]),\n (np.cosh, [([uf3, (-3,3) ], eps), ]),\n (np.exp, [([uf3, (-3,3) ], eps), ]),\n (np.expm1, [([uf3, (-3,3) ], eps), ]),\n (np.sinh, [([uf3, (-3,3) ], eps), ]),\n (np.sin, [([uf3, (-3,3) ], eps), ]),\n (np.tan, [([uf3, (-.8,.8)], eps), ]),\n (np.tanh, [([uf3, (-3,3) ], eps), ]),\n]\n\ndef ufuncTester(ufunc, f, interval, tol):\n ff = Bndfun.initfun_adaptive(f, interval)\n gg = lambda x: ufunc(f(x))\n GG = getattr(ff, ufunc.__name__)()\n def tester(self):\n xx = interval(self.yy)\n vscl = GG.vscale\n lscl = GG.size\n self.assertLessEqual(infnorm(gg(xx)-GG(xx)), vscl*lscl*tol)\n return tester\n\nfor (ufunc, [([f, intvl], tol), ]) in ufunc_test_params:\n interval = Interval(*intvl)\n _testfun_ = ufuncTester(ufunc, f, interval, tol)\n _testfun_.__name__ = \\\n \"test_{}_{}_[{:.1f},{:.1f}]\".format(\n ufunc.__name__, f.__name__, *intvl)\n setattr(Ufuncs, _testfun_.__name__, _testfun_)\n\n\nclass Roots(unittest.TestCase):\n\n def test_empty(self):\n ff = Bndfun.initempty()\n self.assertEquals(ff.roots().size, 0)\n\n def test_const(self):\n ff = Bndfun.initconst(0., Interval(-2,3))\n gg = Bndfun.initconst(2., Interval(-2,3))\n self.assertEquals(ff.roots().size, 0)\n self.assertEquals(gg.roots().size, 0)\n\n# add tests for roots\ndef rootsTester(f, interval, roots, tol):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(f, subinterval)\n rts = ff.roots()\n def tester(self):\n self.assertLessEqual(infnorm(rts-roots), tol)\n return tester\n\nrootstestfuns = (\n (lambda x: 3*x+2., [-2,3], np.array([-2/3]), eps),\n (lambda x: x**2+.2*x-.08, [-2,5], np.array([-.4, .2]), 3e1*eps),\n (lambda x: sin(x), [-7,7], pi*np.linspace(-2,2,5), 1e1*eps),\n (lambda x: cos(2*pi*x), [-20,10], np.linspace(-19.75, 9.75, 60), 3e1*eps),\n (lambda x: sin(100*pi*x), [-0.5,0.5], np.linspace(-.5,.5,101), eps),\n (lambda x: sin(5*pi/2*x), [-1,1], np.array([-.8, -.4, 0, .4, .8]), eps)\n )\nfor k, args in enumerate(rootstestfuns):\n _testfun_ = rootsTester(*args)\n _testfun_.__name__ = \"test_roots_{}\".format(k)\n setattr(Roots, _testfun_.__name__, _testfun_)\n\n# reset the testsfun variable so it doesn't get picked up by nose\n_testfun_ = None\n" ]
[ [ "numpy.array", "numpy.linspace", "numpy.random.rand", "matplotlib.pyplot.subplots" ] ]
solderneer/opencv-adventures
[ "20abea930f44296367217145fab73866ea654084" ]
[ "blur.py" ]
[ "#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\nimage = cv2.imread('../images/input.jpg')\n\nblur = cv2.blur(image, (3,3))\ngaussian_blur = cv2.GaussianBlur(image, (3,3), 0)\nmedian = cv2.medianBlur(image, 5)\n\ncv2.imshow(\"boxblux\", blur)\ncv2.waitKey()\ncv2.imshow(\"gaussian\", gaussian_blur)\ncv2.waitKey()\ncv2.imshow(\"median\", median)\ncv2.waitKey()\n\n# should go look into image de-noising\n# brighten needs an array [-1,-1,-1],[-1,9,-1],[-1,-1,-1]\n# kernel/convolution matrix can even be used for edge detection\nmatrix = np.array([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]])\nsharp = cv2.filter2D(image, -1, matrix)\n\ncv2.imshow(\"sharp\", sharp)\ncv2.waitKey()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.array" ] ]
liuzuxin/RL-Safety-Algorithms
[ "2575225b1ea8ce12e1e13f7a81f8dda7b4189708" ]
[ "tests/test_mean_std.py" ]
[ "import unittest\nimport numpy as np\nimport torch\nfrom rl_safety_algorithms.common.online_mean_std import OnlineMeanStd\nimport rl_safety_algorithms.common.mpi_tools as mpi_tools\n\n\nclass TestOnlineMeanStd(unittest.TestCase):\n \"\"\" Testing the non-MPI version.\n \"\"\"\n\n @staticmethod\n def perform_single_pass(rms, input_shape) -> bool:\n x = torch.from_numpy(np.random.normal(size=input_shape))\n rms(x) # perform one call\n return True\n\n @staticmethod\n def get_data(M, N, epoch):\n \"\"\"Returns data matrix of shape MxN.\"\"\"\n np.random.seed(epoch)\n # start = 10000 + 4 * epoch\n # stop = pid*10000 + M * N + 4 * epoch\n data = np.random.normal(size=(M, N))\n return data\n \n def test_vector_updates(self):\n \"\"\" OnlineMeanStd module is updated with a batch of vector inputs,\n i.e. inputs of shape M x N.\n Note that std dev might differ more than 1e-5 when epochs > 10.\n \"\"\"\n epochs = 20\n T = 500\n obs_shape = (1, )\n\n # === calculation through online updates\n rms = OnlineMeanStd(shape=obs_shape)\n for ep in range(epochs):\n # shape of batch: T x obs_shape\n vector_input = self.get_data(T, obs_shape[0], ep).flatten()\n rms.update(vector_input)\n rms_mean = rms.mean.numpy()\n rms_std = rms.std.numpy()\n\n # ===== calculate ground truths\n obs_list = [self.get_data(T, obs_shape[0], ep) for ep in range(epochs)]\n obs = np.vstack(obs_list)\n gt_mean = np.mean(obs, axis=0)\n gt_std = np.std(obs, axis=0)\n\n self.assertTrue(np.allclose(rms_mean, gt_mean))\n self.assertTrue(np.allclose(rms_std, gt_std, rtol=1e-2))\n self.assertTrue(self.perform_single_pass(rms, obs_shape))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.vstack", "numpy.allclose", "numpy.random.seed", "numpy.random.normal", "numpy.std", "numpy.mean" ] ]
abdelsamea/DeTraC
[ "ab03719b49a1a048f74f08600a6670f6757bbe60" ]
[ "src/frameworks/detrac_torch/feature_composer.py" ]
[ "import tensorflow as tf\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n\nfrom tools.preprocessing import preprocess_images, preprocess_single_image\nfrom tools.kfold import KFold_cross_validation_split\nfrom tools.extraction_and_metrics import extract_features, compute_confusion_matrix\n\nfrom .network import Net\n\nimport torchvision.models as models\nimport torch\n\nimport os\nimport cv2\n\n# Feature composer training\ndef train_feature_composer(\n composed_dataset_path: str,\n epochs: int,\n batch_size: int,\n num_classes: int,\n folds: int,\n lr:float,\n cuda: bool,\n ckpt_dir: str\n):\n \"\"\"\n Feature extractor training.\n\n params:\n <string> composed_dataset_path\n <int> epochs\n <int> batch_size\n <int> num_classes\n <int> folds: Number of folds for KFold cross validation \n <float> lr: Learning rate\n <bool> cuda: Whether to use GPU or not\n <string> ckpt_dir: Model's location\n \"\"\"\n\n # Preprocess images, returning the classes, features and labels\n class_names, x, y = preprocess_images(\n dataset_path=composed_dataset_path, \n width=224, \n height=224, \n num_classes=num_classes, \n framework=\"torch\", \n imagenet=True\n )\n\n # Split data\n X_train, X_test, Y_train, Y_test = KFold_cross_validation_split(\n features=x, \n labels=y, \n n_splits=folds\n )\n\n # Normalize\n X_train /= 255\n X_test /= 255\n\n # Instantiate model\n net = Net(\n models.vgg16(pretrained=True),\n num_classes=num_classes,\n lr=lr,\n cuda=cuda,\n mode=\"feature_composer\",\n ckpt_dir=ckpt_dir,\n labels=class_names\n )\n\n # Train model\n net.fit(\n X_train,\n Y_train,\n X_test,\n Y_test,\n epochs,\n batch_size,\n resume=False\n )\n\n # Confusion matrix\n compute_confusion_matrix(\n y_true=Y_test, \n y_pred=net.infer(X_test), \n framework=\"torch\", \n mode=\"feature_composer\", \n num_classes = num_classes // 2\n )\n\n# Inference\ndef infer(\n ckpt_dir: str, \n ckpt_name: str, \n input_image: str\n) -> dict:\n \"\"\"\n Main inference method.\n\n params:\n <string> ckpt_dir: Saved model's directory\n <string> ckpt_name: Saved model's name\n <string> input_image: Image path\n\n returns:\n <dict> Dictionary containing the predictions with their levels of confidence.\n E.g.: {\n COVID19_1:0.10\n COVID19_2:0.15\n ...\n }\n \"\"\"\n ckpt_path = os.path.join(ckpt_dir, ckpt_name)\n num_classes = torch.load(ckpt_path, map_location=lambda storage, loc: storage)[\"num_classes\"]\n \n # Instantiate model\n net = Net(\n models.vgg16(pretrained=True),\n num_classes=num_classes,\n mode=\"feature_composer\",\n ckpt_dir=ckpt_dir\n )\n \n # Load model\n net.load_model_for_inference(os.path.join(ckpt_dir, ckpt_name))\n \n # Check if inputed file is an image.\n assert input_image.lower().endswith(\"png\") or input_image.lower().endswith(\"jpg\") or input_image.lower().endswith(\"jpeg\")\n\n # Preprocess\n img = preprocess_single_image(\n img=input_image, \n width=224, \n height=224, \n imagenet=True, \n framework=\"torch\"\n )\n\n # Return prediction\n return net.infer(img, ckpt_path = os.path.join(ckpt_dir, ckpt_name), use_labels=True)\n" ]
[ [ "torch.load" ] ]
rangsimanketkaew/learning-to-smell
[ "17021a82f7fcdda00536a906dd8dc64cb5663261" ]
[ "metric.py" ]
[ "import tensorflow as tf\n# from tensorflow.python.framework.ops import disable_eager_execution\n# disable_eager_execution()\nfrom tensorflow.keras import backend as K\n\n\ndef jaccard_tensorflow(y_true, y_pred):\n \"\"\"Jaccard score of Tensor in tensorflow for graph mode.\n \"\"\"\n intersection = tf.sets.intersection(y_true[None:], y_pred[None:])\n intersection = tf.sparse.to_dense(intersection)[0]\n union = tf.sets.union(y_true[None:], y_pred[None:])\n union = tf.sparse.to_dense(union)[0]\n return float(len(intersection) / len(union))\n\n\ndef jaccard_tensorflow_eager(y_true, y_pred):\n \"\"\"Jaccard score with built-in function in tensorflow in eager mode.\n \"\"\"\n set1 = set(y_true.numpy())\n set2 = set(y_pred.numpy())\n return float((len(set1.intersection(set2))) / (len(set1.union(set2))))\n\n\ndef jaccard_from_keras_cont(y_true, y_pred):\n \"\"\"Jaccard score for keras.\n Taken directly from https://github.com/keras-team/keras-contrib/blob/master/keras_contrib/losses/jaccard.py\n \"\"\"\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)\n jac = (intersection) / (sum_ - intersection)\n return (1 - jac)\n" ]
[ [ "tensorflow.sets.intersection", "tensorflow.sets.union", "tensorflow.keras.backend.abs", "tensorflow.sparse.to_dense" ] ]
faver2014/InertialNav_Learn
[ "58a0b6db95918e037ed6d08e5d2c8ba2ce388554" ]
[ "code/plot_wind.py" ]
[ "#!/bin/python\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\nimport numpy as np\nimport math\n\n\t# State vector:\n\t# 0-3: quaternions (q0, q1, q2, q3)\n\t# 4-6: Velocity - m/sec (North, East, Down)\n\t# 7-9: Position - m (North, East, Down)\n\t# 10-12: Delta Angle bias - rad (X,Y,Z)\n\t# 13: Accel offset\n\t# 14-15: Wind Vector - m/sec (North,East)\n\t# 16-18: Earth Magnetic Field Vector - milligauss (North, East, Down)\n\t# 19-21: Body Magnetic Field Vector - milligauss (X,Y,Z)\n\t# 22: Terrain\ntry:\n\tdata = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,\n\t\tskip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',\n\t\t'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd', 'dist'])\nexcept ValueError:\n\ttry:\n\t\tdata = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,\n\t\tskip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',\n\t\t'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])\n\texcept ValueError:\n\t\tdata = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,\n\t\t\tskip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',\n\t\t\t'Bx', 'By', 'Bz', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])\n\nfig = plt.figure()\n\nax1 = fig.add_subplot(211)\n\nax1.set_title(\"Wind Velocity\") \nax1.set_xlabel('time (s)')\nax1.set_ylabel('Wind North')\nax1.plot(data['time'], data['Wn'], color='r', label='Wind N')\n\nax2 = fig.add_subplot(212)\n \nax2.set_xlabel('time (s)')\nax2.set_ylabel('Wind East')\nax2.plot(data['time'], data['We'], color='g', label='Wind E')\n\nplt.show()" ]
[ [ "matplotlib.pyplot.figure", "numpy.genfromtxt", "matplotlib.pyplot.show" ] ]
zimo-geek/mindspore
[ "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff" ]
[ "mindspore/python/mindspore/train/callback/_loss_monitor.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"LossMonitor Callback class.\"\"\"\n\nimport numpy as np\nfrom mindspore.common.tensor import Tensor\n\nfrom ._callback import Callback\n\n\nclass LossMonitor(Callback):\n \"\"\"\n Monitor the loss in training.\n\n If the loss is NAN or INF, it will terminate training.\n\n Note:\n If per_print_times is 0, do not print loss.\n\n Args:\n per_print_times (int): How many steps to print once loss. During sink mode, it will print loss in the\n nearest step. Default: 1.\n\n Raises:\n ValueError: If per_print_times is not an integer or less than zero.\n \"\"\"\n\n def __init__(self, per_print_times=1):\n super(LossMonitor, self).__init__()\n if not isinstance(per_print_times, int) or per_print_times < 0:\n raise ValueError(\"The argument 'per_print_times' must be int and >= 0, \"\n \"but got {}\".format(per_print_times))\n self._per_print_times = per_print_times\n self._last_print_time = 0\n\n def step_end(self, run_context):\n \"\"\"\n Print training loss at the end of step.\n\n Args:\n run_context (RunContext): Context of the train running.\n \"\"\"\n cb_params = run_context.original_args()\n loss = cb_params.net_outputs\n\n if isinstance(loss, (tuple, list)):\n if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):\n loss = loss[0]\n\n if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):\n loss = float(np.mean(loss.asnumpy()))\n\n cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1\n\n if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):\n raise ValueError(\"epoch: {} step: {}. Invalid loss, terminating training.\".format(\n cb_params.cur_epoch_num, cur_step_in_epoch))\n if self._per_print_times != 0 and (cb_params.cur_step_num - self._last_print_time) >= self._per_print_times:\n self._last_print_time = cb_params.cur_step_num\n print(\"epoch: %s step: %s, loss is %s\" % (cb_params.cur_epoch_num, cur_step_in_epoch, loss), flush=True)\n" ]
[ [ "numpy.isinf", "numpy.isnan" ] ]
How-Wang/onnx
[ "c940fa3fea84948e46603cab2f86467291443beb", "c940fa3fea84948e46603cab2f86467291443beb" ]
[ "onnx/backend/test/case/node/reducemin.py", "onnx/backend/test/case/node/cumsum.py" ]
[ "# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass ReduceMin(Base):\n\n @staticmethod\n def export_do_not_keepdims() -> None:\n shape = [3, 2, 2]\n axes = [1]\n keepdims = 0\n\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\n data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n #print(reduced)\n #[[5., 1.]\n # [30., 1.]\n # [55., 1.]]\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_example')\n\n np.random.seed(0)\n data = np.random.uniform(-10, 10, shape).astype(np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_random')\n\n @staticmethod\n def export_keepdims() -> None:\n shape = [3, 2, 2]\n axes = [1]\n keepdims = 1\n\n node = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\n data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n #print(reduced)\n #[[[5., 1.]]\n # [[30., 1.]]\n # [[55., 1.]]]\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_example')\n\n np.random.seed(0)\n data = np.random.uniform(-10, 10, shape).astype(np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_random')\n\n @staticmethod\n def export_default_axes_keepdims() -> None:\n shape = [3, 2, 2]\n axes = None\n keepdims = 1\n\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\n data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\n reduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n #print(reduced)\n #[[[1.]]]\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_example')\n\n np.random.seed(0)\n data = np.random.uniform(-10, 10, shape).astype(np.float32)\n reduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_random')\n\n @staticmethod\n def export_negative_axes_keepdims() -> None:\n shape = [3, 2, 2]\n axes = [-2]\n keepdims = 1\n\n node = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\n data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n # print(reduced)\n #[[[5., 1.]]\n # [[30., 1.]]\n # [[55., 1.]]]\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_example')\n\n np.random.seed(0)\n data = np.random.uniform(-10, 10, shape).astype(np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_random')\n", "# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass CumSum(Base):\n\n @staticmethod\n def export_cumsum_1d() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y']\n )\n x = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\n axis = np.int32(0)\n y = np.array([1., 3., 6., 10., 15.]).astype(np.float64)\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d')\n\n @staticmethod\n def export_cumsum_1d_exclusive() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n exclusive=1\n )\n x = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\n axis = np.int32(0)\n y = np.array([0., 1., 3., 6., 10.]).astype(np.float64)\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_exclusive')\n\n @staticmethod\n def export_cumsum_1d_reverse() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n reverse=1\n )\n x = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\n axis = np.int32(0)\n y = np.array([15., 14., 12., 9., 5.]).astype(np.float64)\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_reverse')\n\n @staticmethod\n def export_cumsum_1d_reverse_exclusive() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n reverse=1,\n exclusive=1\n )\n x = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\n axis = np.int32(0)\n y = np.array([14., 12., 9., 5., 0.]).astype(np.float64)\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_reverse_exclusive')\n\n @staticmethod\n def export_cumsum_2d_axis_0() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n )\n x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\n axis = np.int32(0)\n y = np.array([1., 2., 3., 5., 7., 9.]).astype(np.float64).reshape((2, 3))\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_axis_0')\n\n @staticmethod\n def export_cumsum_2d_axis_1() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n )\n x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\n axis = np.int32(1)\n y = np.array([1., 3., 6., 4., 9., 15.]).astype(np.float64).reshape((2, 3))\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_axis_1')\n\n @staticmethod\n def export_cumsum_2d_negative_axis() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n )\n x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\n axis = np.int32(-1)\n y = np.array([1., 3., 6., 4., 9., 15.]).astype(np.float64).reshape((2, 3))\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_negative_axis')\n" ]
[ [ "numpy.array", "numpy.minimum.reduce", "numpy.random.uniform", "numpy.random.seed" ], [ "numpy.int32", "numpy.array" ] ]
kritika-srivastava/The-Conurbation-Algorithm
[ "d5d39d701b1e09c975dceca5445c4398fd5fd93b" ]
[ "src/procedural_city_generation/polygons/getBlock.py" ]
[ "from __future__ import division\n\nimport numpy as np\n\nfrom procedural_city_generation.additional_stuff.Singleton import Singleton\nfrom procedural_city_generation.polygons.Polygon2D import Polygon2D\n\nsingleton = Singleton(\"polygons\")\n\n\ndef p_in_poly(poly, point):\n x, y = point\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0][0]\n for i in range(n+1):\n p2x, p2y = poly[i % n][0]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n\n return inside\n\n\ndef getBlock(wedges, vertex_list):\n '''Calculate block to be divided into lots, as well as street polygons'''\n\n old_vertices = [vertex_list[wedge.b] for wedge in wedges]\n old_poly = Polygon2D([v.coords for v in old_vertices])\n\n new_vertices = []\n polylist = []\n last2 = []\n\n for i in range(len(old_vertices)):\n\n # Calculate position of new vertex\n alpha = wedges[i-1].alpha\n a, b, c = old_vertices[i-2], old_vertices[i-1], old_vertices[i]\n v1 = a.coords - b.coords\n v2 = c.coords - b.coords\n n1 = np.array((-v1[1], v1[0]))/np.linalg.norm(v1)\n n2 = np.array((v2[1], -v2[0]))/np.linalg.norm(v2)\n\n # Change lengths of normal vectors depending on whether each\n # edge is a minor road or a main road\n if b.minor_road or a.minor_road:\n n1 *= singleton.minor_factor\n else:\n n1 *= singleton.main_factor\n if b.minor_road or c.minor_road:\n n2 *= singleton.minor_factor\n else:\n n2 *= singleton.main_factor\n\n # Check if current vertex is dead end\n if not 0 - 0.001 < alpha < 0 + 0.001:\n # Not a dead end: move edges which share this vertex\n # inwards along their normal vectors, find intersection\n try:\n intersection = np.linalg.solve(\n np.array(((v1), (v2))).T, (b.coords+n2)-(b.coords+n1))\n except np.linalg.LinAlgError:\n raise Exception(str(v1)+\", \"+str(v2),\n \"angle: \"+str(wedges[i-1].alpha))\n new = b.coords + n1 + intersection[0]*v1\n # Check if new vertex is in old polygon\n if p_in_poly(old_poly.edges, new):\n # Append new vertex to lot polygon\n new_vertices.append(new)\n these2 = [b.coords, new]\n if last2:\n street_vertices = last2 + these2\n polylist.append(\n Polygon2D(street_vertices, poly_type=\"road\"))\n last2 = these2[::-1]\n else:\n # New vertex not in polygon, return old polygon as street polygon\n return [old_poly]\n else:\n # Dead end: determine two new vertices by adding the two normals\n # to current vector, then check if these are in old polygon\n new1, new2 = b.coords + n1, b.coords + n2\n if p_in_poly(old_poly.edges, new1) and p_in_poly(old_poly.edges, new2):\n new_vertices += [new1, new2]\n if last2:\n street_vertices = last2 + [b.coords, new1]\n polylist.append(\n Polygon2D(street_vertices, poly_type=\"road\"))\n street_vertices = [b.coords, new2, new1]\n polylist.append(\n Polygon2D(street_vertices, poly_type=\"road\"))\n last2 = [new2, b.coords]\n\n else:\n old_poly.poly_type = \"road\"\n return [old_poly]\n street_vertices = last2 + [old_vertices[-1].coords, new_vertices[0]]\n polylist.append(Polygon2D(street_vertices, poly_type=\"road\"))\n\n # All new vertices are in old polygon: append block polygon\n block_poly = Polygon2D(new_vertices)\n if block_poly.area < singleton.max_area:\n block_poly.poly_type = \"lot\"\n polylist.append(block_poly)\n return polylist\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n import construct_polygons as cp\n polys, vertices = cp.main()\n for p in getBlock(polys[1], vertices):\n p.selfplot()\n plt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.show", "numpy.linalg.norm" ] ]
ryan-dd/autonomous-systems
[ "39fa1394e6b9577600e52d9b7ecd9184a1c90ce1" ]
[ "extended_kalman_filter/extended_kalman_filter.py" ]
[ "from math import cos, sin, atan2, exp\n\nimport numpy as np\n\nfrom heading_range_robot.parameters import *\n\n\nclass EKF:\n def __init__(self, sample_period):\n self._change_t = sample_period\n self.mean_belief = np.vstack((INITIAL_X, INITIAL_Y, INITIAL_THETA))\n self.covariance_belief = np.eye(3)\n self.Qt = np.eye(2)*np.vstack((STD_DEV_LOCATION_RANGE**2, STD_DEV_LOCATION_BEARING**2))\n self.all_features = np.vstack((LANDMARK_1_LOCATION, LANDMARK_2_LOCATION, LANDMARK_3_LOCATION))\n\n def prediction_step(self, theta_prev, vc, wc):\n change_t = self._change_t\n theta = theta_prev\n # Jacobian of ut at xt-1\n Gt = np.array([\n [1, 0, -vc/wc*cos(theta) + vc/wc*cos(theta + wc*change_t)],\n [0, 1, -vc/wc*sin(theta) + vc/wc*sin(theta + wc*change_t)],\n [0, 0, 1]])\n # Jacobian to map noise in control space to state space\n Vt = np.array([\n [(-sin(theta) + sin(theta + wc*change_t))/wc, vc*(sin(theta)-sin(theta + wc*change_t))/(wc**2) + (vc*cos(theta + wc*change_t)*change_t)/wc],\n [(-cos(theta) + cos(theta + wc*change_t))/wc, vc*(cos(theta)-cos(theta + wc*change_t))/(wc**2) + (vc*sin(theta + wc*change_t)*change_t)/wc],\n [0, change_t]])\n\n Mt = np.array([\n [ALPHA1*vc**2 + ALPHA2*wc**2, 0],\n [0, ALPHA3*vc**2 + ALPHA4*wc**2]\n ])\n\n self.mean_belief = self.mean_belief + np.array([\n [-vc/wc*sin(theta) + vc/wc*sin(theta + wc*change_t)],\n [vc/wc*cos(theta) - vc/wc*cos(theta + wc*change_t)],\n [wc*change_t]\n ])\n\n self.covariance_belief = Gt @ self.covariance_belief @ Gt.T + Vt @ Mt @ Vt.T\n\n def measurement_step(self, true_state):\n Qt = self.Qt\n for feature in self.all_features:\n f_x = feature[0]\n f_y = feature[1]\n mean_x = self.mean_belief[0]\n mean_y = self.mean_belief[1]\n mean_theta = self.mean_belief[2]\n # Range and bearing from mean belief\n q = (f_x - mean_x)**2 + (f_y - mean_y)**2\n zti = np.array([\n [np.sqrt(q)],\n [np.arctan2((f_y - mean_y), (f_x - mean_x)) - mean_theta]]).reshape((2,1))\n measurement = simulate_measurement(true_state, f_x, f_y)\n\n Ht = np.array([\n [-(f_x - mean_x)/np.sqrt(q), -(f_y - mean_y)/np.sqrt(q), np.array([0])],\n [(f_y - mean_y)/q, -(f_x - mean_x)/q, np.array([-1])]]).reshape((2,3))\n covariance_belief = self.covariance_belief\n mean_belief = self.mean_belief\n St = Ht @ covariance_belief @ Ht.T + Qt\n Kt = covariance_belief @ Ht.T @ np.linalg.inv(St)\n self.mean_belief = mean_belief + Kt @ (measurement - zti)\n self.covariance_belief = (np.eye(len(Kt)) - Kt @ Ht) @ covariance_belief\n self.kt = Kt\n #pzt = np.linalg.det(2*pi*St)**(-1/2) @ exp(-1/2*(zti - measurement[index]).T @ np.linalg.inv(St) @ (zti - measurement[index]))\n\ndef simulate_measurement(true_state, f_x, f_y):\n true_x = true_state[0]\n true_y = true_state[1]\n true_theta = true_state[2]\n q = (f_x - true_x)**2 + (f_y - true_y)**2\n zt = np.array([\n [np.sqrt(q)],\n [np.arctan2((f_y - true_y), (f_x - true_x)) - true_theta]]).reshape((2,1))\n return zt + np.vstack((range_measurement_noise(), bearing_measurement_noise()))\n\ndef range_measurement_noise():\n return np.random.normal(0, STD_DEV_LOCATION_RANGE)\n\ndef bearing_measurement_noise():\n return np.random.normal(0, STD_DEV_LOCATION_BEARING)" ]
[ [ "numpy.vstack", "numpy.eye", "numpy.sqrt", "numpy.arctan2", "numpy.linalg.inv", "numpy.random.normal", "numpy.array" ] ]
stoman/CompetitiveProgramming
[ "0000b64369b50e31c6f48939e837bdf6cece8ce4" ]
[ "problems/predictingofficespaceprice/submissions/accepted/stefan2.py" ]
[ "#!/usr/bin/env python2\n\n#Author: Stefan Toman\n\nimport itertools\nimport numpy as np\nfrom operator import mul\nfrom sklearn.linear_model import LinearRegression\n\nif __name__ == '__main__':\n #read input\n f, n = map(int, raw_input().split())\n X = []\n y = []\n for _ in range(n):\n line = raw_input().split()\n X.append([float(x) for x in line[:-1]])\n y.append([float(line[-1])])\n q = int(raw_input())\n Xt = []\n for _ in range(q):\n Xt.append([float(x) for x in raw_input().split()])\n #add new features as monomials of degree <= 3\n X = np.array(X)\n Xt = np.array(Xt)\n for i in range(2, 4):\n for var in itertools.product(range(f), repeat=i):\n X = np.hstack((X, reduce(mul, [X[:, j] for j in var]).reshape(-1, 1)))\n Xt = np.hstack((Xt, reduce(mul, [Xt[:, j] for j in var]).reshape(-1, 1)))\n #use sklearn to compute output\n for yt in LinearRegression().fit(X, y).predict(Xt):\n print(yt[0])\n " ]
[ [ "numpy.array", "sklearn.linear_model.LinearRegression" ] ]
JoseLuisRojasAranda/tfmodels
[ "56dce0236f0cc03dd7031aecf305d470c9fb97a9" ]
[ "src/datasets/Fruits360/f360_dataset.py" ]
[ "import tensorflow as tf\nimport cv2\nfrom glob import glob\nimport sys\nimport os\nfrom os import path\nimport json\nimport random\n\nfrom datasets.datasets_features import bytes_feature\n\n# Metodo que regresa el dataset de f360 ya procesado a tfrecord\n# Los data set tiene el formato:\n# x: tensor con la imagen normalizada\n# y: tensor con onehot encoding de la categoria\n# Returns:\n# train_data: Dataset de entrenameinto\n# test_data: Dataset de pruebas\ndef f360_load_dataset(path=None, resize=None, num_classes=None):\n train_path = \"f360_train.tfrecord\"\n test_path = \"f360_test.tfrecord\"\n\n if path == None:\n path = \"\"\n\n train_raw_data = tf.data.TFRecordDataset(path+train_path)\n test_raw_data = tf.data.TFRecordDataset(path+test_path)\n\n _format = {\n \"x\": tf.io.FixedLenFeature([], tf.string),\n \"y\": tf.io.FixedLenFeature([], tf.string)\n }\n\n def _parse_example(example):\n ex = tf.io.parse_single_example(example, _format)\n x = tf.io.parse_tensor(ex[\"x\"], tf.float32)\n y = tf.io.parse_tensor(ex[\"y\"], tf.float32)\n y = tf.reshape(y, [-1])\n\n data_dict = {\n \"x\": x,\n \"y\": y\n }\n\n return x, y\n\n train_data = train_raw_data.map(_parse_example)\n test_data = test_raw_data.map(_parse_example)\n\n def _set_dataset_shape(x, y):\n x.set_shape([100, 100, 3])\n\n return x, y\n\n train_data = train_data.map(_set_dataset_shape)\n test_data = test_data.map(_set_dataset_shape)\n\n if resize != None:\n def _resize_dataset(x, y):\n x = tf.image.resize(x, [resize, resize])\n\n return x, y\n\n train_data = train_data.map(_resize_dataset)\n test_data = test_data.map(_resize_dataset)\n\n with open(path+\"dataset_info.json\", \"r\") as data:\n info = json.load(data)\n\n\n return train_data, test_data, info\n\n# Metodo que convierte el dataset de Fruits 360 a tfrecord, para despues usarlo\n# con el Dataset API de tensorflow\n# Args:\n# training_path: el path al dataset de training\n# test_path: el path al dataset de pruebas\n# num_imgs: numero de images a obtener, -1 para todas\n# result_path: el path donde se guarda el resultado\ndef f360_create_dataset(training_path=None, test_path=None, num_imgs=-1,\n result_path=None, delta=1, offset=0):\n # Crea la carpeta por si no existe donde se va a guardar el resultado\n if not path.exists(result_path):\n os.makedirs(result_path)\n\n process_cats = [\"Apple Golden 1\", \"Banana\", \"Orange\"]\n \"\"\"\n process_cats = [\"Apple Braeburn\", \"Apple Golden 1\", \"Avocado\", \"Lemon\",\n \"Limes\", \"Lychee\", \"Mandarine\", \"Banana\", \"Onion White\", \"Onion White\",\n \"Pear\", \"Orange\", \"Pineapple\", \"Potato White\", \"Strawberry\", \"Tomato 4\"]\n \"\"\"\n\n onehot_depth = len(process_cats)\n onehot_dict = { }\n for i in range(len(process_cats)):\n cat = process_cats[i]\n onehot_dict[cat] = i\n\n # Obtiene todas las categorias que existen\n cats = [x[1] for x in os.walk(training_path)][0]\n\n # Writer al tfrecord\n train_writer = tf.io.TFRecordWriter(result_path+\"f360_train.tfrecord\")\n test_writer = tf.io.TFRecordWriter(result_path+\"f360_test.tfrecord\")\n\n train_size = 0\n test_size = 0\n total_train_size = 0\n total_test_size = 0\n\n categories_size = { }\n\n # funcion que escribe una imagen al tfrecord\n def encode_image_info(image, category, writer):\n # Convierte la imagen a un tensor y lo normaliza \n image_tensor = tf.convert_to_tensor(image)\n image_tensor /= 255\n\n category = tf.one_hot([onehot_dict[category]], onehot_depth)\n\n # Genera los features para el example\n data = {\n \"x\": bytes_feature(tf.io.serialize_tensor(image_tensor)),\n \"y\": bytes_feature(tf.io.serialize_tensor(category))\n }\n\n example = tf.train.Example(features=tf.train.Features(feature=data))\n writer.write(example.SerializeToString())\n\n print(\"[INFO] Writing dataset to tfrecord\")\n # itera sobre todas las categorias a procesar\n for cat in process_cats:\n # si la categoria existe\n if cat in cats:\n print(\"[INFO] Writing {}...\".format(cat))\n train_size = test_size = 0\n # obtiene los paths\n train_img_path = glob(training_path+cat+\"/*.jpg\")\n test_img_path = glob(test_path+cat+\"/*.jpg\")\n\n # Ordena los paths\n train_img_path = sorted(train_img_path)\n test_img_path = sorted(test_img_path)\n\n # el numero de imagenes a que se van a ciclar\n n_train = n_test = num_imgs\n if n_train == -1:\n n_train = len(train_img_path)\n n_test = len(test_img_path)\n\n\n i = offset\n j = 0\n total = 0\n # escribe training images\n \"\"\"\n for i in range(n_train):\n img_path = train_img_path[i]\n image = cv2.imread(img_path)\n encode_image_info(image, cat, train_writer)\n train_size += 1\n \"\"\"\n while total < n_train:\n img_path = train_img_path[i]\n image = cv2.imread(img_path)\n encode_image_info(image, cat, train_writer)\n train_size += 1\n #i += random.randint(10, 20)\n i += delta\n if i >= n_train: i = i - n_train\n total += delta\n\n # escribe test images\n for j in range(n_test):\n img_path = test_img_path[j]\n image = cv2.imread(img_path)\n encode_image_info(image, cat, test_writer)\n test_size += 1\n\n categories_size[cat] = (train_size, test_size)\n\n total_train_size += train_size\n total_test_size += test_size\n\n train_writer.close()\n test_writer.close()\n\n dataset_info = {\n \"name\": \"Fruits 360 dataset\",\n \"num_classes\": len(process_cats),\n \"delta\": delta,\n \"offset\": offset,\n \"categories\": process_cats,\n \"train_size\": total_train_size,\n \"test_size\": total_test_size,\n \"categories_size\": categories_size\n }\n\n # Escribe el info del dataset\n with open(result_path+\"dataset_info.json\", \"w\") as writer:\n json.dump(dataset_info, writer, indent=4)\n\n" ]
[ [ "tensorflow.data.TFRecordDataset", "tensorflow.io.parse_single_example", "tensorflow.reshape", "tensorflow.io.TFRecordWriter", "tensorflow.image.resize", "tensorflow.io.FixedLenFeature", "tensorflow.one_hot", "tensorflow.convert_to_tensor", "tensorflow.train.Features", "tensorflow.io.parse_tensor", "tensorflow.io.serialize_tensor" ] ]
tomasstolker/AMICAL
[ "c9bbf8e4a468313efff3b349fffea7648c411a51" ]
[ "amical/_cli/commands/clean.py" ]
[ "import os\nfrom datetime import datetime\nfrom glob import glob\nfrom pathlib import Path\n\nfrom astropy.io import fits\nfrom matplotlib import pyplot as plt\nfrom tabulate import tabulate\nfrom termcolor import cprint\nfrom tqdm import tqdm\n\nimport amical\n\n\ndef _select_data_file(args, process):\n \"\"\"Show report with the data found and allow to select one to be treated.\"\"\"\n l_file = sorted(glob(\"%s/*.fits\" % args.datadir))\n\n if len(l_file) == 0:\n print(\"No fits files found in %s, check --datadir.\" % args.datadir)\n return 1\n\n headers = [\"FILENAME\", \"TARGET\", \"DATE\", \"INSTRUM\", \"INDEX\"]\n\n index_file = []\n d = []\n for i, f in enumerate(l_file):\n with fits.open(f) as hdu:\n hdr = hdu[0].header\n target = hdr.get(\"OBJECT\", None)\n date = hdr.get(\"DATE-OBS\", None)\n ins = hdr.get(\"INSTRUME\", None)\n index_file.append(i)\n filename = f.split(\"/\")[-1]\n d.append([filename, target, date, ins, i])\n\n print(tabulate(d, headers=headers))\n\n if args.file >= 0:\n choosen_index = args.file\n else:\n choosen_index = int(input(\"\\nWhich file to %s?\\n\" % process))\n\n try:\n filename = l_file[choosen_index]\n except IndexError:\n print(\n \"Selected index (%i) not valid (only %i files found).\"\n % (choosen_index, len(l_file))\n )\n raise SystemExit\n else:\n with fits.open(filename) as hdul:\n hdr = hdul[0].header\n return filename, hdr\n\n\ndef perform_clean(args):\n \"\"\"Clean the data with AMICAL.\"\"\"\n cprint(\"---- AMICAL clean process ----\", \"cyan\")\n\n clean_param = {\n \"isz\": args.isz,\n \"r1\": args.r1,\n \"dr\": args.dr,\n \"apod\": args.apod,\n \"window\": args.window,\n \"f_kernel\": args.kernel,\n }\n\n if not os.path.exists(args.datadir):\n print(\n \"%s directory not found, check --datadir. AMICAL look for data only in this specified directory.\"\n % args.datadir\n )\n return 1\n\n l_file = sorted(glob(\"%s/*.fits\" % args.datadir))\n if len(l_file) == 0:\n print(\"No fits files found in %s, check --datadir.\" % args.datadir)\n return 1\n\n if not args.all:\n filename, hdr = _select_data_file(args, process=\"clean\")\n\n if args.check:\n amical.show_clean_params(filename, **clean_param)\n plt.show(block=True)\n return 0\n\n if not os.path.exists(args.outdir):\n os.mkdir(args.outdir)\n\n clean_param[\"clip\"] = args.clip\n clean_param[\"sky\"] = args.sky\n\n if args.all:\n # Clean all files in --datadir\n for f in tqdm(l_file, ncols=100, desc=\"# files\"):\n hdr = fits.open(f)[0].header\n hdr[\"HIERARCH AMICAL step\"] = \"CLEANED\"\n cube = amical.select_clean_data(f, **clean_param, display=True)\n f_clean = os.path.join(args.outdir, Path(f).stem + \"_cleaned.fits\")\n fits.writeto(f_clean, cube, header=hdr, overwrite=True)\n else:\n # Or clean just the specified file (in --datadir)\n hdr[\"HIERARCH AMICAL step\"] = \"CLEANED\"\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n hdr[\"HIERARCH AMICAL time\"] = dt_string\n for k in clean_param:\n hdr[\"HIERARCH AMICAL params %s\" % k] = clean_param[k]\n cube = amical.select_clean_data(filename, **clean_param, display=True)\n if args.plot:\n plt.show()\n f_clean = os.path.join(args.outdir, Path(filename).stem + \"_cleaned.fits\")\n fits.writeto(f_clean, cube, header=hdr, overwrite=True)\n return 0\n" ]
[ [ "matplotlib.pyplot.show" ] ]
kristianwiklund/AOC2019
[ "a98affaccd53ca4ea2d3a8c3fa125680f1e8cc08" ]
[ "2018/7/7.py" ]
[ "import networkx as nx\nimport matplotlib.pyplot as plt\n\nG = nx.DiGraph()\n\n#with open (\"shortinput.txt\") as fd:\nwith open (\"input.txt\") as fd:\n\n for line in fd:\n x = line.split(\" \")\n before = x[1]\n after = x[7]\n G.add_edge(before, after, weight=ord(after)-64)\n\nnx.draw(G, with_labels=True)\nplt.savefig(\"maze.png\")\nhelalistan=list(nx.lexicographical_topological_sort(G))\nprint(\"7A :\"+\"\".join(helalistan))\n\n# ---------------------\n\n#ACHOQRXSEKUGMYIWDZLNBFTJVP\n\ntime=0\nworkers = [0,0,0,0,0,0,0,0,0,0]\ndoing = [None, None,None,None,None,None,None,None,None]\n\n\nwhile list(G.nodes()) != []:\n\n for i in range(0,6):\n\n if workers[i] <= 0:\n # finish what was done, then pull something\n if doing[i]:\n# print (\"Worker \"+str(i)+\" is done with \"+doing[i])\n G.remove_node(doing[i])\n doing[i] = None\n \n for j in helalistan:\n #print (\"Trying to pull node \"+j)\n if not j in doing:\n #print (\"Nobody is working on \"+j)\n if G.has_node(j) and list(G.predecessors(j)) == []:\n # print (\"Worker \"+str(i)+\" pulls node \"+j)\n doing[i] = j\n workers[i] = 60+ord(j)-65\n break\n \n else:\n workers[i] = workers[i] - 1\n\n # print(\"Tick: \"+str(time) + \" working on \"+str(doing))\n time=time+1\n\n\nprint(\"Total time for assembly: \"+str(time-1))\n\n \n \n \n \n \n" ]
[ [ "matplotlib.pyplot.savefig" ] ]
WadhwaniAI/ESTRNN
[ "4af8d53b0ebb1655c40aaf4f6950904580a34aa2" ]
[ "data/anthro.py" ]
[ "import os\nimport random\nfrom os.path import join, basename, dirname\n\nimport cv2\nimport numpy as np\nimport torch\nfrom glob import glob\nimport ipdb\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\n\nfrom utils import normalize, Crop, Flip, ToTensor\n\n\nclass AnthroDeblurDataset(Dataset):\n \"\"\"\n Structure of self_.records:\n seq:\n frame:\n path of images -> {'Blur': <path>, 'Sharp': <path>}\n \"\"\"\n\n def __init__(self, path, frames, future_frames, past_frames, crop_size=(256, 256), data_format='RGB',\n centralize=True, normalize=True):\n assert frames - future_frames - past_frames >= 1\n\n self.frames = frames\n self.num_ff = future_frames\n self.num_pf = past_frames\n self.data_format = data_format\n self.W = None\n self.H = None\n self.crop_h, self.crop_w = crop_size\n self.normalize = normalize\n self.centralize = centralize\n self.transform = transforms.Compose([Crop(crop_size), ToTensor()])\n self._seq_length = 200\n self._samples = self._generate_samples(path, data_format)\n\n def _generate_samples(self, dataset_path, data_format):\n samples = list()\n records = dict()\n seq = basename(dataset_path)\n\n records[seq] = list()\n frames = sorted(glob(join(dataset_path, '*.jpg')))\n for frame in frames[:self._seq_length]:\n sample = dict()\n sample['Blur'] = frame\n sample['Sharp'] = frame\n records[seq].append(sample)\n\n self.H, self.W, _ = cv2.imread(frame).shape\n\n\n for seq_records in records.values():\n temp_length = len(seq_records) - (self.frames - 1)\n if temp_length <= 0:\n raise IndexError('Exceed the maximum length of the video sequence')\n for idx in range(temp_length):\n samples.append(seq_records[idx:idx + self.frames])\n\n\n return samples\n\n def __getitem__(self, item):\n top = random.randint(0, self.H - self.crop_h)\n left = random.randint(0, self.W - self.crop_w)\n flip_lr = random.randint(0, 1)\n flip_ud = random.randint(0, 1)\n sample = {'top': top, 'left': left, 'flip_lr': flip_lr, 'flip_ud': flip_ud}\n\n blur_imgs, sharp_imgs = [], []\n for sample_dict in self._samples[item]:\n blur_img, sharp_img = self._load_sample(sample_dict, sample)\n blur_imgs.append(blur_img)\n sharp_imgs.append(sharp_img)\n sharp_imgs = sharp_imgs[self.num_pf:self.frames - self.num_ff]\n return [torch.cat(item, dim=0) for item in [blur_imgs, sharp_imgs]]\n\n def _load_sample(self, sample_dict, sample):\n if self.data_format == 'RGB':\n sample['image'] = cv2.imread(sample_dict['Blur'])\n sample['label'] = cv2.imread(sample_dict['Sharp'])\n else:\n raise NotImplementedError\n # elif self.data_format == 'RAW':\n # sample['image'] = cv2.imread(sample_dict['Blur'], -1)[..., np.newaxis].astype(np.int32)\n # sample['label'] = cv2.imread(sample_dict['Sharp'], -1)[..., np.newaxis].astype(np.int32)\n\n\n sample = self.transform(sample)\n val_range = 2.0 ** 8 - 1 if self.data_format == 'RGB' else 2.0 ** 16 - 1\n blur_img = normalize(sample['image'], centralize=self.centralize, normalize=self.normalize, val_range=val_range)\n sharp_img = normalize(sample['label'], centralize=self.centralize, normalize=self.normalize, val_range=val_range)\n\n return blur_img, sharp_img\n\n def __len__(self):\n return len(self._samples)\n\n\nclass Dataloader:\n def __init__(self, para, device_id, ds_type='train'):\n path = join(para.data_root, para.dataset)\n frames = para.frames\n dataset = AnthroDeblurDataset(path, frames, para.future_frames, para.past_frames, para.patch_size, para.data_format,\n para.centralize, para.normalize)\n gpus = para.num_gpus\n bs = para.batch_size\n ds_len = len(dataset)\n if para.trainer_mode == 'ddp':\n sampler = torch.utils.data.distributed.DistributedSampler(\n dataset,\n num_replicas=para.num_gpus,\n rank=device_id\n )\n self.loader = DataLoader(\n dataset=dataset,\n batch_size=para.batch_size,\n shuffle=False,\n num_workers=para.threads,\n pin_memory=True,\n sampler=sampler,\n drop_last=True\n )\n loader_len = np.ceil(ds_len / gpus)\n self.loader_len = int(np.ceil(loader_len / bs) * bs)\n\n elif para.trainer_mode == 'dp':\n self.loader = DataLoader(\n dataset=dataset,\n batch_size=para.batch_size,\n shuffle=True,\n num_workers=para.threads,\n pin_memory=True,\n drop_last=True\n )\n self.loader_len = int(np.ceil(ds_len / bs) * bs)\n\n def __iter__(self):\n return iter(self.loader)\n\n def __len__(self):\n return self.loader_len\n\n\nif __name__ == '__main__':\n from para import Parameter\n\n para = Parameter().args\n para.data_format = 'RGB'\n para.data_root = '/home/users/aditya/projects/ESTRNN/data/'\n para.dataset = 'anthro/358129091084785_19032020105115/video_1_baby_chessboard_ruler_4046788426114666387/'\n dataloader = Dataloader(para, 0)\n for x, y in dataloader:\n print(x.shape, y.shape)\n break\n print(x.type(), y.type())\n print(np.max(x.numpy()), np.min(x.numpy()))\n print(np.max(y.numpy()), np.min(y.numpy()))\n" ]
[ [ "torch.utils.data.DataLoader", "torch.cat", "numpy.ceil", "torch.utils.data.distributed.DistributedSampler" ] ]
nagomiso/komono
[ "3158dc14ebaee724defe63d54c214d40065558d7" ]
[ "tests/test_reduce_memory.py" ]
[ "import pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal, assert_series_equal\n\nimport komono.pandas._reduce_memory as rd\n\n\[email protected]\ndef base_data():\n return {\n \"int8\": [-128, 127],\n \"int16\": [-129, 127],\n \"Int8\": [None, 127],\n \"Str\": [\"foo\", \"bar\"],\n }\n\n\[email protected]\ndef base_dtype():\n return {\n \"int8\": \"int64\",\n \"int16\": \"int64\",\n \"Int8\": \"Int64\",\n \"Str\": \"string\",\n }\n\n\[email protected]\ndef base_dataframe(base_data, base_dtype) -> pd.DataFrame:\n return pd.DataFrame.from_dict(base_data).astype(base_dtype)\n\n\[email protected](\n \"min_,max_,expected_dtype\",\n [\n (-128, 127, \"int8\"),\n (-128, 128, \"int16\"),\n (-129, 127, \"int16\"),\n (-129, 128, \"int16\"),\n (-32_768, 32_767, \"int16\"),\n (-32_768, 32_768, \"int32\"),\n (-32_769, 32_767, \"int32\"),\n (-32_769, 32_768, \"int32\"),\n (-2_147_483_648, 2_147_483_647, \"int32\"),\n (-2_147_483_648, 2_147_483_648, \"int64\"),\n (-2_147_483_649, 2_147_483_647, \"int64\"),\n (-2_147_483_649, 2_147_483_648, \"int64\"),\n ],\n)\ndef test_reduce_integer_series_not_nullable(min_, max_, expected_dtype):\n series = pd.Series([min_, max_], dtype=\"int64\")\n dtype = str(series.dtype)\n expected = pd.Series([min_, max_], dtype=expected_dtype)\n actual = rd._reduce_integer_series(series, dtype=dtype)\n assert_series_equal(actual, expected)\n\n\[email protected](\n \"min_,mid,max_,expected_dtype\",\n [\n (-128, None, 127, \"Int8\"),\n (-128, None, 128, \"Int16\"),\n (-129, None, 127, \"Int16\"),\n (-129, None, 128, \"Int16\"),\n (-32_768, None, 32_767, \"Int16\"),\n (-32_768, None, 32_768, \"Int32\"),\n (-32_769, None, 32_767, \"Int32\"),\n (-32_769, None, 32_768, \"Int32\"),\n (-2_147_483_648, None, 2_147_483_647, \"Int32\"),\n (-2_147_483_648, None, 2_147_483_648, \"Int64\"),\n (-2_147_483_649, None, 2_147_483_647, \"Int64\"),\n (-2_147_483_649, None, 2_147_483_648, \"Int64\"),\n ],\n)\ndef test_reduce_integer_series_nullable(min_, mid, max_, expected_dtype):\n series = pd.Series([min_, mid, max_], dtype=\"Int64\")\n dtype = str(series.dtype)\n expected = pd.Series([min_, mid, max_], dtype=expected_dtype)\n actual = rd._reduce_integer_series(series, dtype=dtype)\n assert_series_equal(actual, expected)\n\n\[email protected](\n \"min_,max_,expected_dtype\",\n [\n (-65500.0, 65500.0, \"float16\"),\n (-65500.0, 65600.0, \"float32\"),\n (-65600.0, 65500.0, \"float32\"),\n (-65600.0, 65600.0, \"float32\"),\n (-3.4028e38, 3.4028e38, \"float32\"),\n (-3.4028235e38, 3.4028335e38, \"float64\"),\n (-3.4028335e38, 3.4028235e38, \"float64\"),\n (-3.4028335e38, 3.4028335e38, \"float64\"),\n ],\n)\ndef test_reduce_float_series(min_, max_, expected_dtype):\n series = pd.Series([min_, max_], dtype=\"float64\")\n expected = pd.Series([min_, max_], dtype=expected_dtype)\n actual = rd._reduce_float_series(series)\n assert_series_equal(actual, expected)\n\n\ndef test_reduce_memory_usage(base_data, base_dataframe):\n expected = pd.DataFrame.from_dict(data=base_data,).astype(\n {\n \"int8\": \"int8\",\n \"int16\": \"int16\",\n \"Int8\": \"Int8\",\n \"Str\": \"string\",\n }\n )\n actual = rd.reduce_memory_usage(base_dataframe, verbose=True)\n assert_frame_equal(actual, expected)\n" ]
[ [ "pandas.testing.assert_series_equal", "pandas.Series", "pandas.testing.assert_frame_equal", "pandas.DataFrame.from_dict" ] ]
swyang50066/sun-jupiter-earth-orbit
[ "c50012ff1a187485b717d86a24c25cfe6edd78a1" ]
[ "source/force.py" ]
[ "import numpy as np\n\nfrom allvar import *\n\n\ndef _distance(r1, r2):\n \"\"\"Return Euclidean _distance between positions\"\"\"\n return np.sqrt(np.sum((r1 - r2)**2.))\n\n\ndef drdt(r, v):\n \"\"\"Return position derivative\n\n :param r: shape: (x_earth, y_earth, x_jupiter, y_jupiter))\n :param v: shape: (vx_earth, vy_earth, vx_jupiter, vy_jupiter) \n :return: velocities\n \"\"\"\n return v\n\n\ndef dvdt(r, v, eps=1.e-20):\n \"\"\"Return position derivative\n\n Central star have fixed position at (0, 0)\n\n :param r: shape: (x_earth, y_earth, x_jupiter, y_jupiter)\n :param v: shape: (vx_earth, vy_earth, vx_jupiter, vy_jupiter)\n :return: accelerations\n \"\"\"\n # Geometric measurements\n r_se, r_sj, r_ej = r[:2], r[2:], r[2:] - r[:2]\n dist_se = _distance((0, 0), r_se)\n dist_sj = _distance((0, 0), r_sj)\n dist_ej = _distance(r_se, r_sj)\n\n theta_se = np.math.atan(np.abs(r_se[1])/(np.abs(r_se[0]) + eps))\n theta_sj = np.math.atan(np.abs(r_sj[1])/(np.abs(r_sj[0]) + eps))\n theta_ej = np.math.atan(np.abs(r_ej[1])/(np.abs(r_ej[0]) + eps))\n \n # Unit force functionals\n const_se = GG*(EARTH_MASS/SOLAR_MASS)\n f_se = -np.sign(r_se)*const_se*np.array(\n [ \n np.cos(theta_se)/(dist_se + eps)**2.,\n np.sin(theta_se)/(dist_se + eps)**2.\n ]\n )\n const_sj = GG*(JUPITER_MASS/SOLAR_MASS)\n f_sj = -np.sign(r_sj)*const_sj*np.array(\n [\n np.cos(theta_sj)/(dist_sj + eps)**2.,\n np.sin(theta_sj)/(dist_sj + eps)**2.\n ]\n )\n const_ej = GG*(EARTH_MASS*JUPITER_MASS/SOLAR_MASS**2.)\n f_ej = -np.sign(r_ej)*const_ej*np.array(\n [\n np.cos(theta_ej)/(dist_ej + eps)**2.,\n np.sin(theta_ej)/(dist_ej + eps)**2.\n ]\n )\n \n return np.hstack([\n (f_se - f_ej)/(EARTH_MASS/SOLAR_MASS),\n (f_sj + f_ej)/(JUPITER_MASS/SOLAR_MASS),\n ])\n" ]
[ [ "numpy.sum", "numpy.sign", "numpy.abs", "numpy.cos", "numpy.hstack", "numpy.sin" ] ]
haojiepan1/CrossWOZ
[ "6d7b4c4cfb73a528b76074764687906abecc90b6" ]
[ "tests/test_end2end.py" ]
[ "from convlab2.nlu.svm.multiwoz import SVMNLU\nfrom convlab2.nlu.jointBERT.multiwoz import BERTNLU\nfrom convlab2.nlu.milu.multiwoz import MILU\nfrom convlab2.dst.rule.multiwoz import RuleDST\nfrom convlab2.policy.rule.multiwoz import RulePolicy\nfrom convlab2.nlg.template.multiwoz import TemplateNLG\nfrom convlab2.dialog_agent import PipelineAgent, BiSession\nfrom convlab2.evaluator.multiwoz_eval import MultiWozEvaluator\nfrom pprint import pprint\nimport random\nimport numpy as np\nimport torch\n\nsys_nlu = BERTNLU(mode='all', config_file='multiwoz_all.json',\n model_file='https://tatk-data.s3-ap-northeast-1.amazonaws.com/bert_multiwoz_all.zip')\n# sys_nlu = SVMNLU(mode='sys')\n# simple rule DST\nsys_dst = RuleDST()\n# rule policy\nsys_policy = RulePolicy(character='sys')\n# template NLG\nsys_nlg = TemplateNLG(is_user=False)\n# assemble\nsys_agent = PipelineAgent(sys_nlu, sys_dst, sys_policy, sys_nlg, 'sys')\n\n# user_nlu = sys_nlu\n# user_nlu = SVMNLU(mode='all')\nuser_nlu = MILU(model_file=\"https://convlab.blob.core.windows.net/models/milu.tar.gz\")\n# not use dst\nuser_dst = None\n# rule policy\nuser_policy = RulePolicy(character='usr')\n# template NLG\nuser_nlg = TemplateNLG(is_user=True)\n# assemble\nuser_agent = PipelineAgent(user_nlu, None, user_policy, user_nlg, 'user')\n\nevaluator = MultiWozEvaluator()\nsess = BiSession(sys_agent=sys_agent, user_agent=user_agent, kb_query=None, evaluator=evaluator)\n\nrandom.seed(20200131)\nnp.random.seed(20190827)\ntorch.manual_seed(20200131)\nsys_response = ''\nsess.init_session()\nprint('init goal:')\npprint(sess.evaluator.goal)\nprint('-'*50)\nfor i in range(40):\n sys_response, user_response, session_over, reward = sess.next_turn(sys_response)\n print('user:', user_response)\n print('sys:', sys_response)\n print()\n if session_over is True:\n print('task complete:', user_policy.policy.goal.task_complete())\n print('task success:', sess.evaluator.task_success())\n print('book rate:', sess.evaluator.book_rate())\n print('inform precision/recall/f1:', sess.evaluator.inform_F1())\n print('-'*50)\n print('final goal:')\n pprint(sess.evaluator.goal)\n print('='*100)\n break\n\ntotal_dialog = 10\nrandom.seed(20200131)\ngoal_seeds = [random.randint(1,100000) for _ in range(total_dialog)]\nprecision = 0\nrecall = 0\nf1 = 0\nsuc_num = 0\ncomplete_num = 0\nfor j in range(total_dialog):\n sys_response = ''\n random.seed(goal_seeds[0])\n np.random.seed(goal_seeds[0])\n torch.manual_seed(goal_seeds[0])\n goal_seeds.pop(0)\n sess.init_session()\n # print('init goal:')\n # pprint(sess.evaluator.goal)\n # print('-'*50)\n for i in range(40):\n sys_response, user_response, session_over, reward = sess.next_turn(\n sys_response)\n # print('user:', user_response)\n # print('sys:', sys_response)\n if session_over is True:\n if sess.evaluator.task_success() == 1:\n suc_num = suc_num+1\n if user_policy.policy.goal.task_complete():\n complete_num += 1\n print('task complete:', user_policy.policy.goal.task_complete())\n print('task success:', sess.evaluator.task_success())\n print('book rate:', sess.evaluator.book_rate())\n print('inform precision/recall/f1:', sess.evaluator.inform_F1())\n stats = sess.evaluator.inform_F1()\n if(stats[0] != None):\n precision = precision+stats[0]\n if(stats[1] != None):\n recall = recall+stats[1]\n if(stats[2] != None):\n f1 = f1+stats[2]\n else:\n suc_num = suc_num-1\n # print('-'*50)\n # print('final goal:')\n # pprint(sess.evaluator.goal)\n # print('='*100)\n break\nprint(\"complete number of dialogs/tot:\", complete_num/total_dialog)\nprint(\"success number of dialogs/tot:\", suc_num/total_dialog)\nprint(\"average precision:\", precision/total_dialog)\nprint(\"average recall:\", recall/total_dialog)\nprint(\"average f1:\", f1/total_dialog)" ]
[ [ "torch.manual_seed", "numpy.random.seed" ] ]
HenryJia/lightning-baselines3
[ "10d1a0eed6136978204323250e37d49915a12e14" ]
[ "tests/on_policy_models/test_on_policy_model.py" ]
[ "from collections import OrderedDict\n\nimport pytest\n\nimport gym\nfrom gym import spaces\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch import distributions\n\nimport pytorch_lightning as pl\n\nfrom lightning_baselines3.on_policy_models.on_policy_model import OnPolicyModel\n\n\n\nclass DummyModel(OnPolicyModel):\n def __init__(self, *args, **kwargs):\n super(DummyModel, self).__init__(*args, **kwargs)\n\n if isinstance(self.action_space, spaces.Discrete):\n self.p = nn.Parameter(torch.ones(1, self.action_space.n) * 0.5)\n elif isinstance(self.action_space, spaces.Box):\n self.p = nn.Parameter(torch.ones(1, self.action_space.shape[0] * 2) * 0.5)\n else:\n raise Exception('Incompatible environment action space')\n\n\n def forward(self, x, **kwargs):\n p = self.p.expand(x.shape[0], self.p.shape[-1])\n if isinstance(self.action_space, spaces.Discrete):\n dist = distributions.Categorical(probs=F.softmax(p, dim=1))\n elif isinstance(self.action_space, spaces.Box):\n p = torch.chunk(p, 2, dim=1)\n dist = distributions.Normal(loc=p[0], scale=1 + p[1] ** 2)\n return dist, torch.ones_like(x)[:, :1]\n\n\n def predict(self, x, deterministic=True):\n p = self.p.expand(x.shape[0], self.p.shape[-1])\n if deterministic:\n if isinstance(self.action_space, spaces.Discrete):\n out = torch.max(p, dim=1)[1]\n elif isinstance(self.action_space, spaces.Box):\n out = torch.chunk(p, 2, dim=1)[0]\n else:\n if isinstance(self.action_space, spaces.Discrete):\n out = distributions.Categorical(probs=F.softmax(p, dim=1)).sample()\n elif isinstance(self.action_space, spaces.Box):\n p = torch.chunk(p, 2, dim=1)\n out = distributions.Normal(loc=p[0], scale=1 + p[1] ** 2).sample()\n return out.cpu().numpy()\n\n\n def training_step(self, x, batch_idx):\n loss = self(x.observations)[0].entropy().mean()\n self.log('loss', loss)\n return loss \n\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer\n\n\n\n\[email protected](\"env_id\", [\"CartPole-v1\", \"MountainCar-v0\", \"MountainCarContinuous-v0\"])\ndef test_on_policy_model(env_id):\n \"\"\"\n Check that environmnent integrated in Gym pass the test.\n\n :param env_id: (str)\n \"\"\"\n model = DummyModel(\n env_id,\n eval_env=env_id,\n buffer_length=512,\n num_rollouts=1,\n batch_size=32,\n epochs_per_rollout=10,\n num_eval_episodes=10,\n gamma=0.9,\n gae_lambda=0.95,\n use_sde=False,\n sde_sample_freq=-1,\n verbose=1,\n seed=1234)\n\n trainer = pl.Trainer(max_epochs=2, terminate_on_nan=True)\n trainer.fit(model)\n" ]
[ [ "torch.ones_like", "torch.ones", "torch.distributions.Normal", "torch.chunk", "torch.nn.functional.softmax", "torch.max" ] ]
golunovas/onnx-tensorflow
[ "b6340b3e66aa08af1ea4382e98257c2098177371" ]
[ "test/backend/test_node.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport math\nimport unittest\nimport numpy as np\nimport tensorflow as tf\nfrom onnx_tf.backend import run_node\nfrom onnx_tf.common import supports_device\nfrom onnx_tf.common.legacy import legacy_onnx_pre_ver, legacy_opset_pre_ver\nfrom onnx import helper\nfrom onnx import TensorProto\nfrom onnx import defs\n\n\nclass TestNode(unittest.TestCase):\n \"\"\" Tests for nodes\n \"\"\"\n\n def _get_rnd_float32(self, low=-1.0, high=1.0, shape=None):\n output = np.random.uniform(low, high, shape)\n if shape == None:\n return np.float32(output)\n else:\n return output.astype(np.float32)\n\n def _get_rnd_int(self, low, high=None, shape=None, dtype=np.int32):\n return np.random.randint(low, high, size=shape, dtype=dtype)\n\n def _elu(self, x):\n # f(x) = alpha * (exp(x) - 1.) for x < 0,\n # f(x) = x for x >= 0\n if x < 0.:\n return np.expm1(x)\n return x\n\n def _leaky_relu(self, x, alpha):\n # f(x) = alpha * x for x < 0,\n # f(x) = x for x >= 0\n if x < 0.:\n return alpha * x\n return x\n\n def test_abs(self):\n node_def = helper.make_node(\"Abs\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.abs(x))\n\n def test_acosh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Acosh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Acosh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.arccosh(x))\n\n def test_add(self):\n node_def = helper.make_node(\"Add\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[5, 10, 5, 5])\n y = self._get_rnd_float32(shape=[10, 1, 1])\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"],\n np.add(x, y.reshape([1, 10, 1, 1])))\n\n # node_def = helper.make_node(\"Add\", [\"A\", \"B\"], [\"C\"], broadcast=1)\n # a = self._get_rnd([10, 10])\n # b = self._get_rnd([10, 10])\n # output = run_node(node_def, [a, b])\n # np.testing.assert_almost_equal(output[\"C\"], np.add(a, b))\n\n # node_def = helper.make_node(\"Add\", [\"A\", \"B\"], [\"C\"], broadcast=1)\n # a = self._get_rnd([10, 10])\n # b = self._get_rnd([10,])\n # output = run_node(node_def, [a, b])\n # np.testing.assert_almost_equal(output[\"C\"], np.add(a, b))\n\n def test_arg_max(self):\n # TODO: need to fix this test\n return\n for axis in [0, 1]:\n node_def = helper.make_node(\n \"ArgMax\", [\"data\"], [\"reduced\"], axis=axis, keepdims=0)\n data = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [data])\n np.testing.assert_almost_equal(output[\"reduced\"],\n np.argmax(data, axis=axis))\n\n def test_arg_min(self):\n # TODO: need to fix this test\n return\n for axis in [0, 1]:\n node_def = helper.make_node(\n \"ArgMin\", [\"data\"], [\"reduced\"], axis=axis, keepdims=0)\n data = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [data])\n np.testing.assert_almost_equal(output[\"reduced\"],\n np.argmin(data, axis=axis))\n\n def test_asinh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Asinh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Asinh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.arcsinh(x))\n\n def test_atanh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Atanh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Atanh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.arctanh(x))\n\n def test_average_pool(self):\n # TODO: fix this test\n return\n device = \"CUDA\"\n if not supports_device(device):\n raise unittest.SkipTest(\n \"Backend doesn't support device {}\".format(device))\n shape = [1, 1, 40, 40]\n node_def = helper.make_node(\n \"AveragePool\", [\"X\"], [\"Y\"],\n kernel_shape=[1, 2],\n pads=[1, 1],\n strides=[1, 1])\n x = self._get_rnd_float32(shape=shape)\n output = run_node(node_def, [x], device=device)\n test_output = np.zeros(shape)\n for i1 in range(0, shape[0]):\n for i2 in range(0, shape[1]):\n for j1 in range(0, shape[2]):\n for j2 in range(0, shape[3]):\n test_output[i1][i2][j1][j2] = 0\n count = 0\n for k in range(j2, min(j2 + 2, shape[3])):\n test_output[i1][i2][j1][j2] += x[i1][i2][j1][k]\n count += 1\n test_output[i1][i2][j1][j2] /= count\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def _batch_normalization(self, x, mean, variance, bias, scale,\n variance_epsilon):\n inv = np.reciprocal(np.sqrt(variance + variance_epsilon))\n if scale is not None:\n inv *= scale\n return x * inv + (bias - mean * inv if bias is not None else -mean * inv)\n\n def test_batch_normalization(self):\n if legacy_opset_pre_ver(6):\n raise unittest.SkipTest(\"Backend doesn't support consumed flag\")\n node_def = helper.make_node(\n \"BatchNormalization\", [\"X\", \"scale\", \"bias\", \"mean\", \"var\"], [\"Y\"],\n epsilon=0.001)\n x_shape = [3, 5, 4, 2]\n param_shape = [5]\n _param_shape = [1, 5, 1, 1]\n x = self._get_rnd_float32(0, 1, shape=x_shape)\n m = self._get_rnd_float32(0, 1, shape=param_shape)\n _m = m.reshape(_param_shape)\n v = self._get_rnd_float32(0, 1, shape=param_shape)\n _v = v.reshape(_param_shape)\n scale = self._get_rnd_float32(0, 1, shape=param_shape)\n _scale = scale.reshape(_param_shape)\n bias = self._get_rnd_float32(0, 1, shape=param_shape)\n _bias = bias.reshape(_param_shape)\n golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)\n output = run_node(node_def, [x, scale, bias, m, v])\n np.testing.assert_almost_equal(output[\"Y\"], golden, decimal=5)\n\n def test_cast(self):\n if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):\n test_cases = [(\"FLOAT\", tf.float32), (\"UINT8\", tf.uint8),\n (\"INT8\", tf.int8), (\"UINT16\", tf.uint16), (\"INT16\",\n tf.int16),\n (\"INT32\", tf.int32), (\"INT64\", tf.int64), (\"BOOL\", tf.bool),\n (\"FLOAT16\", tf.float16), (\"DOUBLE\", tf.float64),\n (\"COMPLEX64\", tf.complex64), (\"COMPLEX128\", tf.complex128)]\n else:\n test_cases = [(TensorProto.FLOAT,\n tf.float32), (TensorProto.UINT8,\n tf.uint8), (TensorProto.INT8, tf.int8),\n (TensorProto.UINT16,\n tf.uint16), (TensorProto.INT16,\n tf.int16), (TensorProto.INT32, tf.int32),\n (TensorProto.INT64,\n tf.int64), (TensorProto.BOOL,\n tf.bool), (TensorProto.FLOAT16, tf.float16),\n (TensorProto.DOUBLE,\n tf.float64), (TensorProto.COMPLEX64,\n tf.complex64), (TensorProto.COMPLEX128,\n tf.complex128)]\n if not legacy_opset_pre_ver(9):\n test_cases.append((TensorProto.STRING, tf.string))\n for ty, tf_type in test_cases:\n node_def = helper.make_node(\"Cast\", [\"input\"], [\"output\"], to=ty)\n vector = [2, 3]\n output = run_node(node_def, [vector])\n np.testing.assert_equal(output[\"output\"].dtype, tf_type)\n\n if not legacy_opset_pre_ver(9):\n test_cases2 = [(TensorProto.FLOAT, tf.float32), (TensorProto.INT32,\n tf.int32),\n (TensorProto.INT64, tf.int64), (TensorProto.DOUBLE,\n tf.float64)]\n for ty, tf_type in test_cases2:\n node_def = helper.make_node(\"Cast\", [\"input\"], [\"output\"], to=ty)\n vector = ['2', '3']\n output = run_node(node_def, [vector])\n np.testing.assert_equal(output[\"output\"].dtype, tf_type)\n\n def test_ceil(self):\n node_def = helper.make_node(\"Ceil\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.ceil(x))\n\n def test_compress(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\n \"ONNX version {} doesn't support Compress.\".format(\n defs.onnx_opset_version()))\n axis = 1\n node_def = helper.make_node(\n \"Compress\", inputs=['X', 'condition'], outputs=['Y'], axis=axis)\n x = self._get_rnd_float32(shape=[5, 5, 5])\n cond = np.array([1, 0, 1])\n output = run_node(node_def, inputs=[x, cond])\n np.testing.assert_almost_equal(output['Y'], np.compress(cond, x, axis=axis))\n\n def test_concat(self):\n shape = [10, 20, 5]\n for axis in range(len(shape)):\n node_def = helper.make_node(\"Concat\", [\"X1\", \"X2\"], [\"Y\"], axis=axis)\n x1 = self._get_rnd_float32(shape=shape)\n x2 = self._get_rnd_float32(shape=shape)\n output = run_node(node_def, [x1, x2])\n np.testing.assert_almost_equal(output[\"Y\"], np.concatenate((x1, x2),\n axis))\n\n def test_constant(self):\n shape = [16, 16]\n values = np.random.randn(*shape).flatten().astype(float)\n const2_onnx = helper.make_tensor(\"const2\", TensorProto.DOUBLE, shape,\n values)\n node_def = helper.make_node(\"Constant\", [], [\"Y\"], value=const2_onnx)\n output = run_node(node_def, [])\n np.testing.assert_equal(output[\"Y\"].shape, shape)\n np.testing.assert_almost_equal(output[\"Y\"].flatten(), values)\n\n # test sparse tensor\n if not legacy_opset_pre_ver(11):\n expected = np.array([[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]])\n x = np.array([[0, 0], [1, 2]]).flatten().astype(np.int64)\n values = helper.make_tensor(\"values\", TensorProto.INT32, [2], [1, 2])\n indices = helper.make_tensor(\"indices\", TensorProto.INT64, [2, 2], x)\n a = helper.make_sparse_tensor(values, indices,[3, 4])\n node_def = helper.make_node(\"Constant\", [], [\"Y\"], sparse_value=a)\n output = run_node(node_def, [])\n b = tf.sparse_to_dense(output[\"Y\"].indices, output[\"Y\"].dense_shape, output[\"Y\"].values)\n result = b.eval(session=tf.Session())\n np.testing.assert_equal(result, expected)\n\n def test_constant_fill(self):\n if not legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\n \"ONNX version {} doesn't support ConstantFill.\".format(\n defs.onnx_opset_version()))\n shape = [1, 2, 3, 4]\n extra_shape = [5, 6]\n value = 3.\n node_def = helper.make_node(\n \"ConstantFill\",\n [\"X\"],\n [\"Y\"],\n value=value,\n extra_shape=extra_shape,\n dtype=1,\n )\n x = self._get_rnd_float32(shape=shape)\n y = np.zeros(shape + extra_shape)\n y.fill(value)\n output = run_node(node_def, [x])\n np.testing.assert_equal(output[\"Y\"].dtype, tf.float32)\n np.testing.assert_equal(output[\"Y\"], y)\n\n def test_constant_of_shape(self):\n if defs.onnx_opset_version() < 9:\n raise unittest.SkipTest(\n \"ONNX version {} doesn't support ConstantOfShape.\".format(\n defs.onnx_opset_version()))\n v = helper.make_tensor(\"value\", TensorProto.FLOAT, [1], [1])\n node_def = helper.make_node(\"ConstantOfShape\", [\"X\"], [\"Y\"], value=v)\n x = np.array([4, 3, 2])\n output = run_node(node_def, inputs=[x])\n np.testing.assert_almost_equal(output[\"Y\"], np.ones(x, dtype=np.float32))\n v = helper.make_tensor(\"value\", TensorProto.INT32, [1], [0])\n node_def = helper.make_node(\"ConstantOfShape\", [\"X\"], [\"Y\"], value=v)\n x = np.array([10, 6])\n output = run_node(node_def, inputs=[x])\n np.testing.assert_almost_equal(output[\"Y\"], np.zeros(x, dtype=np.int32))\n\n def test_conv(self):\n device = \"CUDA\"\n if not supports_device(device):\n raise unittest.SkipTest(\n \"Backend doesn't support device {}\".format(device))\n\n N, C, H, W = 4, 3, 5, 5\n x_shape = [N, C, H, W]\n K, kH, kW = 6, 3, 3\n weight_shape = [K, C, kH, kW]\n node_def = helper.make_node(\n \"Conv\", [\"X\", \"weights\"], [\"Y\"],\n pads=[1, 1, 1, 1],\n kernel_shape=[kH, kW])\n\n x = self._get_rnd_float32(shape=x_shape)\n weights = self._get_rnd_float32(shape=weight_shape)\n output = run_node(node_def, [x, weights], device=device)\n\n out_shape = [N, K, H, W]\n test_output = np.zeros(out_shape)\n for n in range(N):\n for c in range(C):\n for h in range(H):\n for w in range(W):\n for k in range(K):\n for kh in range(kH):\n for kw in range(kW):\n h_in_range = (h - kH // 2 + kh) < H and (\n h - kH // 2 + kh) >= 0\n w_in_range = (w - kW // 2 + kw) < W and (\n w - kW // 2 + kw) >= 0\n if h_in_range and w_in_range:\n test_output[n][k][h][w] += (x[n][c][h - kH // 2 + kh][\n w - kW // 2 + kw] * weights[k][c][kh][kw])\n\n np.testing.assert_almost_equal(output[\"Y\"], test_output, decimal=5)\n\n def test_conv_transpose(self):\n # Fix test in the future.\n return\n device = \"CUDA\"\n if not supports_device(device):\n raise unittest.SkipTest(\n \"Backend doesn't support device {}\".format(device))\n node_def = helper.make_node(\n \"ConvTranspose\", [\"X\", \"weights\"], [\"Y\"], pads=[1, 1])\n x_shape = [1, 5, 4]\n x = self._get_rnd(x_shape)\n weight_shape = [5, 3, 2]\n weights = self._get_rnd_float32(shape=weight_shape)\n output = run_node(node_def, [x, weights], device=device)\n out_shape = [x_shape[0], weight_shape[1], x_shape[2]]\n test_output = np.zeros(out_shape)\n for b in range(0, x_shape[0]):\n for m in range(0, weight_shape[1]):\n for h in range(0, x_shape[2]):\n v = 0\n for c in range(0, x_shape[1]):\n for k in range(h, min(h + weight_shape[2], x_shape[2])):\n v += x[b][c][k] * weights[c][m][k - h]\n test_output[b][m][h] = v\n np.testing.assert_almost_equal(output[\"Y\"], test_output, decimal=5)\n\n def test_cosh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Cosh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Cosh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.cosh(x))\n\n def test_depth_to_space(self):\n node_def = helper.make_node(\"DepthToSpace\", [\"X\"], [\"Y\"], blocksize=2)\n x_shape = [1, 12, 1, 1]\n x = self._get_rnd_float32(shape=x_shape)\n output = run_node(node_def, [x])\n x = np.transpose(x, (0, 2, 3, 1))\n y = np.reshape(np.swapaxes(x.reshape(1, 1, 1, 2, 2, 3), 2, 3), (1, 2, 2, 3))\n y = np.transpose(y, (0, 3, 1, 2))\n np.testing.assert_almost_equal(output[\"Y\"], y, decimal=5)\n\n def test_dequantize_linear(self):\n node_def = helper.make_node(\"DequantizeLinear\",\n [\"x\", \"x_scale\", \"x_zero_point\"], [\"y\"])\n for x, x_zero_point in [\n [\n self._get_rnd_int(-128, 127, [2, 6], np.int8),\n self._get_rnd_int(-128, 127, dtype=np.int8)\n ],\n [\n self._get_rnd_int(0, 255, [2, 6], np.uint8),\n self._get_rnd_int(0, 255, dtype=np.uint8)\n ],\n [\n self._get_rnd_int(-512, 512, [2, 6]),\n np.int32(0)\n ]\n ]:\n x_scale = self._get_rnd_float32(-10., 10)\n y = np.subtract(np.float32(x), np.float32(x_zero_point))\n y = np.multiply(y, x_scale)\n output = run_node(node_def, [x, x_scale, x_zero_point])\n np.testing.assert_almost_equal(output[\"y\"], y)\n\n def test_div(self):\n node_def = helper.make_node(\"Div\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[10, 10])\n y = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.divide(x, y))\n\n def test_dropout(self):\n # Since current ONNX only support inference and\n # dropout at inference mode is a no-op,\n # therefore dropout is always a no-op operator\n # in ONNX.\n node_def = helper.make_node(\"Dropout\", [\"X\"], [\"Y\"])\n if legacy_opset_pre_ver(7):\n # at inference mode, is_test is always set to 1\n node_def = helper.make_node(\"Dropout\", [\"X\"], [\"Y\"], is_test=1)\n x = self._get_rnd_float32(shape=[3, 4, 5])\n y = x\n output = run_node(node_def, [x])\n np.testing.assert_equal(output[\"Y\"], y)\n\n def test_dot(self):\n # this op is removed\n # remove this test in the future\n return\n node_def = helper.make_node(\"Dot\", [\"X\", \"Y\"], [\"Z\"])\n x = np.floor(self._get_rnd_float32(shape=[10, 10]))\n y = np.floor(self._get_rnd_float32(shape=[10, 10]))\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.dot(x, y))\n\n def test_elu(self):\n node_def = helper.make_node(\"Elu\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[100])\n output = run_node(node_def, [x])\n test_output = [self._elu(a) for a in x]\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_equal(self):\n node_def = helper.make_node(\"Equal\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[5, 3, 3, 2])\n y = self._get_rnd_float32(shape=[3, 3, 1])\n output = run_node(node_def, [x, y])\n np.testing.assert_equal(output[\"Z\"], np.equal(x, np.reshape(\n y, [1, 3, 3, 1])))\n\n def test_erf(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Erf.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Erf\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n exp_output = np.vectorize(math.erf)(x).astype(np.float32)\n np.testing.assert_almost_equal(output[\"Y\"], exp_output)\n\n def test_exp(self):\n node_def = helper.make_node(\"Exp\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[100])\n x = x - 3.6\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.exp(x))\n\n def test_eye_like(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support EyeLike.\".format(\n defs.onnx_opset_version()))\n for shape in [[6, 10], [10, 6]]:\n for off_diagonal_offset in [-10, -6, -3, 0, 3, 6, 7, 10]:\n node_def = helper.make_node(\n \"EyeLike\", ['x'], ['y'], dtype=1, k=off_diagonal_offset)\n x = self._get_rnd_int(0, 100, shape=shape)\n y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)\n output = run_node(node_def, [x])\n np.testing.assert_equal(output['y'], y)\n\n def test_flatten(self):\n # If input tensor has shape (d_0, d_1, ... d_n) then the\n # output will have shape:\n #\n # (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)\n #\n # TODO: pass axis attribute which is supported in newer\n # versions of onnx\n node_def = helper.make_node(\"Flatten\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[10, 2, 3, 4, 5])\n output = run_node(node_def, [x])\n # TODO: pass axis=3 and uncomment the line below\n # np.testing.assert_almost_equal(output[\"Y\"], x.reshape([60, 20]))\n np.testing.assert_almost_equal(output[\"Y\"], x.reshape([10, 120]))\n\n def test_gather(self):\n node_def = helper.make_node(\"Gather\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[10, 10])\n y = [[0, 1], [1, 2]]\n output = run_node(node_def, [x, y])\n test_output = np.zeros((2, 2, 10))\n for i in range(0, 2):\n for j in range(0, 10):\n test_output[0][i][j] = x[i][j]\n for i in range(0, 2):\n for j in range(0, 10):\n test_output[1][i][j] = x[i + 1][j]\n np.testing.assert_almost_equal(output[\"Z\"], test_output)\n\n def test_gemm(self):\n # Compute Y = alpha * A * B + beta * C\n node_def = helper.make_node(\n \"Gemm\", [\"A\", \"B\", \"C\"], [\"Y\"], transA=0, transB=0, alpha=1.0, beta=1.0)\n x = np.floor(self._get_rnd_float32(shape=[10, 10]))\n y = np.floor(self._get_rnd_float32(shape=[10, 10]))\n z = np.floor(self._get_rnd_float32(shape=[10, 10]))\n output = run_node(node_def, [x, y, z])\n test_output = np.matmul(x, y) + z\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_global_average_pool(self):\n # Image case: (N x C x H x W), where N is the batch size,\n # C is the number of channels, and H and W are the height\n # and the width of the data\n #\n # Non-image case: (N x C x D1 x D2 ... Dn)\n #\n # Output data tensor from pooling across the input tensor.\n # Dimensions will be N x C x 1 x 1\n node_def = helper.make_node(\"GlobalAveragePool\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[10, 10, 2, 3])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 1, 1])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n sum = 0\n for j1 in range(0, 2):\n for j2 in range(0, 3):\n sum += x[i1][i2][j1][j2]\n test_output[i1][i2][0][0] = sum / 6.\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_image_sacler(self):\n # Input: (N x C x H x W), where N is the batch size,\n # C is the number of channels, and H and W are the height\n # and the width of the data\n # Scale: (flout, default 1.0) the scale to apply\n # Bias: applied to each channel, same size as C\n # Output has same shape and type as input\n x = self._get_rnd_float32(shape=[1, 3, 224, 224])\n #random distribution over [0,1), so add 0.1\n scale = np.random.rand(1)[0] + 0.1\n bias = np.random.rand(3)\n node_def = helper.make_node(\n \"ImageScaler\", [\"X\"], [\"Y\"], scale=scale, bias=bias)\n output = run_node(node_def, [x])\n test_out = np.multiply(x, scale)\n test_out = np.transpose(test_out, [0, 2, 3, 1])\n test_out = np.add(test_out, bias)\n test_out = np.transpose(test_out, [0, 3, 1, 2])\n np.testing.assert_almost_equal(output[\"Y\"], test_out)\n\n def test_is_inf(self):\n if legacy_opset_pre_ver(10):\n raise unittest.SkipTest(\"ONNX version {} doesn't support IsInf.\".format(\n defs.onnx_opset_version()))\n input = np.array(\n [-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float32)\n expected_output = {\n \"node_def\": np.isinf(input),\n \"node_def_neg_false\": np.isposinf(input),\n \"node_def_pos_false\": np.isneginf(input)\n }\n node_defs = {\n \"node_def\":\n helper.make_node(\"IsInf\", [\"X\"], [\"Y\"]),\n \"node_def_neg_false\":\n helper.make_node(\"IsInf\", [\"X\"], [\"Y\"], detect_negative=0),\n \"node_def_pos_false\":\n helper.make_node(\"IsInf\", [\"X\"], [\"Y\"], detect_positive=0)\n }\n for key in node_defs:\n output = run_node(node_defs[key], [input])\n np.testing.assert_equal(output[\"Y\"], expected_output[key])\n\n def test_isnan(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support IsNaN.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"IsNaN\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 3])\n x[0][1] = x[1][0] = x[2][2] = np.nan\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.isnan(x))\n\n def test_global_lp_pool(self):\n # Image case: (N x C x H x W), where N is the batch size,\n # C is the number of channels, and H and W are the height\n # and the width of the data\n #\n # Non-image case: (N x C x D1 x D2 ... Dn)\n #\n # Output data tensor from pooling across the input tensor.\n # Dimensions will be N x C x 1 x 1\n node_def = helper.make_node(\"GlobalLpPool\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[10, 10, 2, 3])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 1, 1])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n tmp = np.zeros([2, 3])\n for j1 in range(0, 2):\n for j2 in range(0, 3):\n tmp[j1][j2] = x[i1][i2][j1][j2]\n test_output[i1][i2][0][0] = np.linalg.norm(tmp)\n np.testing.assert_almost_equal(output[\"Y\"], test_output, decimal=5)\n\n def test_global_max_pool(self):\n # Image case: (N x C x H x W), where N is the batch size,\n # C is the number of channels, and H and W are the height\n # and the width of the data\n #\n # Non-image case: (N x C x D1 x D2 ... Dn)\n #\n # Output data tensor from pooling across the input tensor.\n # Dimensions will be N x C x 1 x 1\n node_def = helper.make_node(\"GlobalMaxPool\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[10, 10, 2, 3])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 1, 1])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n max = x[i1][i2][0][0]\n for j1 in range(0, 2):\n for j2 in range(0, 3):\n if max < x[i1][i2][j1][j2]:\n max = x[i1][i2][j1][j2]\n test_output[i1][i2][0][0] = max\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_less(self):\n node_def = helper.make_node(\"Less\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[5, 3, 3, 2])\n y = self._get_rnd_float32(shape=[3, 3, 1])\n output = run_node(node_def, [x, y])\n np.testing.assert_equal(output[\"Z\"], np.less(x, np.reshape(y,\n [1, 3, 3, 1])))\n\n def test_lp_normalization(self):\n for ordr in range(1, 3):\n node_def = helper.make_node(\"LpNormalization\", [\"X\"], [\"Y\"], p=ordr)\n x = self._get_rnd([2, 2, 3, 2])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"],\n x / np.expand_dims(np.linalg.norm(x, axis=-1, ord=ordr), -1),\n rtol=1e-3)\n\n def test_l_r_n(self):\n # Each input value is divided by:\n #\n # (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta\n alpha = 2.0\n beta = 1.0\n bias = 5.0\n size = 3\n node_def = helper.make_node(\n \"LRN\", [\"X\"], [\"Y\"], alpha=alpha, beta=beta, bias=bias, size=size)\n x = self._get_rnd_float32(shape=[10, 2, 10, 10])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 10, 2])\n x = np.transpose(x, axes=[0, 2, 3, 1])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n for j1 in range(0, 10):\n for j2 in range(0, 2):\n sqr_sum = 0.\n # size of 3 means radius 1 in TF speak\n # i.e. the immediate neighbouring values\n # if \"previous\" neighbour exists\n if j2 > 0:\n sqr_sum += x[i1][i2][j1][j2 - 1] * x[i1][i2][j1][j2 - 1]\n # current value\n sqr_sum += x[i1][i2][j1][j2] * x[i1][i2][j1][j2]\n # if \"next\" neighbour exists\n if j2 < 2 - 1:\n sqr_sum += x[i1][i2][j1][j2 + 1] * x[i1][i2][j1][j2 + 1]\n test_output[i1][i2][j1][j2] = \\\n x[i1][i2][j1][j2] / ((bias + (alpha * 1. / size) * sqr_sum) ** beta)\n test_output = np.transpose(test_output, axes=[0, 3, 1, 2])\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_floor(self):\n node_def = helper.make_node(\"Floor\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[100])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.floor(x))\n\n def test_leakyrelu(self):\n node_def = helper.make_node(\"LeakyRelu\", [\"X\"], [\"Y\"], alpha=0.8)\n x = np.floor(self._get_rnd_float32(shape=[100]))\n output = run_node(node_def, [x])\n test_output = [self._leaky_relu(a, 0.8) for a in x]\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_log(self):\n node_def = helper.make_node(\"Log\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[100])\n x = x + 3.6\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.log(x))\n\n def test_max(self):\n node_def = helper.make_node(\"Max\", [\"X1\", \"X2\", \"X3\", \"X4\"], [\"Z\"])\n x1 = self._get_rnd_float32(shape=[10, 10])\n x2 = self._get_rnd_float32(shape=[10, 10])\n x3 = self._get_rnd_float32(shape=[10, 10])\n x4 = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x1, x2, x3, x4])\n test_output = np.maximum(np.maximum(np.maximum(x1, x2), x3), x4)\n np.testing.assert_almost_equal(output[\"Z\"], test_output)\n\n def test_max_pool(self):\n return\n node_def = helper.make_node(\n \"MaxPool\", [\"X\"], [\"Y\"],\n dilations=[1, 1],\n kernel_shape=[1, 2],\n pads=[0, 0],\n strides=[1, 2])\n x = self._get_rnd_float32(shape=[10, 10, 4, 4])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 4, 2])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n for j1 in range(0, 4):\n for j2 in range(0, 2):\n test_output[i1][i2][j1][j2] = \\\n max(x[i1][i2][j1][2*j2], x[i1][i2][j1][2*j2 + 1])\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_mean_variance_normalization(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\n \"ONNX version {} doesn't have test for MeanVarianceNormalization\"\n .format(defs.onnx_opset_version()))\n\n input_data = self._get_rnd_float32(shape=[2,2,2,2])\n # Calculate expected output data using formula:\n # (Input - Mean)/SD\n mean = np.mean(input_data, keepdims=1, axis=(0,2,3))\n std = np.std(input_data, keepdims=1, axis=(0,2,3))\n expected_output = (input_data - mean) / std\n # Testing without \"axes\" argument should default to axes=[0,2,3]\n node_def = helper.make_node(\"MeanVarianceNormalization\", [\"X\"], [\"Y\"])\n output = run_node(node_def, [input_data])\n np.testing.assert_almost_equal(output[\"Y\"], expected_output, decimal=5)\n\n def test_min(self):\n node_def = helper.make_node(\"Min\", [\"X1\", \"X2\", \"X3\", \"X4\"], [\"Z\"])\n x1 = self._get_rnd_float32(shape=[10, 10])\n x2 = self._get_rnd_float32(shape=[10, 10])\n x3 = self._get_rnd_float32(shape=[10, 10])\n x4 = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x1, x2, x3, x4])\n test_output = np.minimum(np.minimum(np.minimum(x1, x2), x3), x4)\n np.testing.assert_almost_equal(output[\"Z\"], test_output)\n\n def test_mul(self):\n node_def = helper.make_node(\"Mul\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[5, 10, 5, 5])\n y = self._get_rnd_float32(shape=[10, 1, 1])\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"],\n np.multiply(x, y.reshape([1, 10, 1, 1])))\n\n def test_mod(self):\n if legacy_opset_pre_ver(10):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Mod.\".format(\n defs.onnx_opset_version()))\n x = self._get_rnd_float32(shape=[5, 5])\n y = self._get_rnd_float32(shape=[5, 5])\n node_def = helper.make_node(\"Mod\", [\"X\", \"Y\"], [\"Z\"], fmod=0)\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.mod(x, y))\n node_def = helper.make_node(\"Mod\", [\"X\", \"Y\"], [\"Z\"], fmod=1)\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.fmod(x, y))\n\n def test_neg(self):\n node_def = helper.make_node(\"Neg\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.negative(x))\n\n def test_non_zero(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support NonZero.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"NonZero\", [\"x\"], [\"y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n y = np.array(np.nonzero(x))\n output = run_node(node_def, [x])\n np.testing.assert_equal(output[\"y\"], y)\n\n def test_onehot(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support OneHot.\".format(\n defs.onnx_opset_version()))\n indices = np.array([[0, 2], [1, 2], [0, 1]])\n depth = np.int32(5)\n on_value = 6.0\n off_value = 2.0\n values = np.array([off_value, on_value])\n node_def = helper.make_node(\n 'OneHot', inputs=['indices', 'depth', 'values'], outputs=['y'], axis=-1)\n y = (np.arange(depth) == indices[..., None]).astype(int)\n y = y * (on_value - off_value) + off_value\n output = run_node(node_def, inputs=[indices, depth, values])\n np.testing.assert_equal(output['y'], y)\n\n def test_range(self):\n if legacy_opset_pre_ver(11):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Range.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\n \"Range\", ['start', 'limit', 'delta'], ['y'])\n # test positive_delta\n start = self._get_rnd_int(low=0, high=3)\n limit = self._get_rnd_int(low=10, high=30)\n delta = np.int32(3)\n output = run_node(node_def, [start, limit, delta])\n np.testing.assert_equal(output['y'], range(start, limit, delta))\n # test negative_delta\n start = self._get_rnd_int(low=20, high=30)\n limit = self._get_rnd_int(low=1, high=5)\n delta = np.int32(-2)\n output = run_node(node_def, [start, limit, delta])\n np.testing.assert_equal(output['y'], range(start, limit, delta))\n\n def test_round(self):\n if legacy_opset_pre_ver(11):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Round.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Round\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(-20.0, 20.0, shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.round(x))\n\n def test_relu(self):\n node_def = helper.make_node(\"Relu\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.maximum(x, 0))\n\n def test_pad(self):\n node_def = helper.make_node(\n \"Pad\", [\"X\"], [\"Y\"], mode=\"constant\", pads=[1, 1, 1, 1], value=2.0)\n x = self._get_rnd_float32(shape=[100, 100])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"],\n np.lib.pad(\n x, ((1, 1), (1, 1)),\n 'constant',\n constant_values=(2, 2)))\n\n def test_quantize_linear(self):\n node_def = helper.make_node(\"QuantizeLinear\",\n [\"x\", \"y_scale\", \"y_zero_point\"], [\"y\"])\n for x in [\n self._get_rnd_float32(-512., 512., [2, 6]),\n self._get_rnd_int(-512, 512, [2, 6])\n ]:\n y_scale = self._get_rnd_float32(-10., 10.)\n for y_zero_point in [\n self._get_rnd_int(-128, 127, dtype=np.int8),\n self._get_rnd_int(0, 255, dtype=np.uint8)\n ]:\n y = np.divide(x, y_scale)\n y = np.round(y)\n y = np.add(y, y_zero_point)\n if y_zero_point.dtype.type is np.int8:\n y = np.clip(y, -128, 127).astype(np.int8)\n else:\n y = np.clip(y, 0, 255).astype(np.uint8)\n output = run_node(node_def, [x, y_scale, y_zero_point])\n np.testing.assert_almost_equal(output[\"y\"], y)\n\n def test_reciprocal(self):\n node_def = helper.make_node(\"Reciprocal\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], 1.0 / x)\n\n def test_reduce_l1(self):\n node_def = helper.make_node(\"ReduceL1\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"],\n np.linalg.norm(x, 1, (1, 2), True))\n\n def test_reduce_log_sum_exp(self):\n node_def = helper.make_node(\"ReduceLogSumExp\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"],\n np.log(np.sum(np.exp(x), axis=(1, 2), keepdims=True)),\n rtol=1e-3)\n\n def test_reduce_max(self):\n node_def = helper.make_node(\"ReduceMax\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.max(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_mean(self):\n node_def = helper.make_node(\"ReduceMean\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.mean(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_min(self):\n node_def = helper.make_node(\"ReduceMin\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.min(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_prod(self):\n node_def = helper.make_node(\"ReduceProd\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[1, 5, 5, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.prod(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_sum(self):\n node_def = helper.make_node(\"ReduceSum\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.sum(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_sum_square(self):\n node_def = helper.make_node(\"ReduceSumSquare\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.sum(np.square(x), (1, 2), keepdims=True), rtol=1e-3)\n\n def test_pow(self):\n node_def = helper.make_node(\"Pow\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=1000) / 2.0 + 0.5\n y = self._get_rnd_float32(shape=1000) / 2.0 + 0.5\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.power(x, y))\n\n def test_reshape(self):\n x = self._get_rnd_float32(shape=100)\n shape = [10, 10]\n if defs.onnx_opset_version() < 5:\n node_def = helper.make_node(\"Reshape\", [\"X\"], [\"Z\"], shape=shape)\n output = run_node(node_def, [x])\n else:\n node_def = helper.make_node(\"Reshape\", [\"X\", \"Y\"], [\"Z\"])\n output = run_node(node_def, [x, shape])\n\n np.testing.assert_almost_equal(output[\"Z\"], x.reshape([10, 10]))\n\n def test_reshape_with_copy(self):\n x = self._get_rnd_float32(shape=[10, 20 * 30])\n shape = [0, 20, 30]\n if defs.onnx_opset_version() < 5:\n node_def = helper.make_node(\"Reshape\", [\"X\"], [\"Z\"], shape=shape)\n output = run_node(node_def, [x])\n else:\n node_def = helper.make_node(\"Reshape\", [\"X\", \"Y\"], [\"Z\"])\n output = run_node(node_def, [x, shape])\n\n np.testing.assert_almost_equal(output[\"Z\"], x.reshape([10, 20, 30]))\n\n def test_selu(self):\n node_def = helper.make_node(\"Selu\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n alpha = 1.6732\n gamma = 1.0507\n x[x <= 0] = gamma * (alpha * np.exp(x[x <= 0]) - alpha)\n x[x > 0] = gamma * x[x > 0]\n np.testing.assert_allclose(output[\"Y\"], x, rtol=1e-3, atol=1e-7)\n\n def test_shape(self):\n node_def = helper.make_node(\"Shape\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(output[\"Y\"], np.shape(x))\n\n def test_shrink(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Shrink.\".format(\n defs.onnx_opset_version()))\n\n node_def = helper.make_node(\"Shrink\", [\"X\"], [\"Y\"], bias=1.5, lambd=1.5)\n\n X = np.arange(-2.0, 2.1, dtype=np.float32)\n Y = np.array([-0.5, 0, 0, 0, 0.5], dtype=np.float32)\n output = run_node(node_def, [X])\n np.testing.assert_almost_equal(output[\"Y\"], Y)\n\n def test_sigmoid(self):\n node_def = helper.make_node(\"Sigmoid\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], 1 / (1 + np.exp(-x)))\n\n def test_sign(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Sign.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Sign\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(-10, 10, [3, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.sign(x))\n\n def test_sinh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Sinh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Sinh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.sinh(x))\n\n def test_size(self):\n node_def = helper.make_node(\"Size\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.size(x))\n\n def test_slice(self):\n # test case 1 with normal inputs\n axes = [0, 1, 2]\n starts = [0, 0, 0]\n ends = [2, 2, 2]\n steps = [1, 1, 1]\n\n if legacy_opset_pre_ver(10):\n node_def = helper.make_node(\n \"Slice\", [\"X\"], [\"S\"], axes=axes, starts=starts, ends=ends)\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"S\"], x[0:2, 0:2, 0:2])\n else:\n node_def = helper.make_node(\n \"Slice\", [\"X\", \"starts\", \"ends\", \"axes\", \"steps\"], [\"S\"])\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x, starts, ends, axes, steps])\n np.testing.assert_almost_equal(output[\"S\"], x[0:2, 0:2, 0:2])\n\n # test case 2 with negative, out-of-bound and default inputs\n axes = [0, 2]\n starts = [0, -7]\n ends = [-8, 20]\n\n if legacy_opset_pre_ver(10):\n node_def = helper.make_node(\n \"Slice\", [\"X\"], [\"S\"], axes=axes, starts=starts, ends=ends)\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"S\"], x[0:-8, :, -7:20])\n else:\n node_def = helper.make_node(\n \"Slice\", [\"X\", \"starts\", \"ends\", \"axes\"], [\"S\"])\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x, starts, ends, axes])\n np.testing.assert_almost_equal(output[\"S\"], x[0:-8, :, -7:20])\n\n # test case 3 with non-default steps\n axes = [0, 1, 2]\n starts = [0, 0, 0]\n ends = [2, 2, 2]\n steps = [2, -2, -1]\n\n if legacy_opset_pre_ver(10) == False:\n node_def = helper.make_node(\n \"Slice\", [\"X\", \"starts\", \"ends\", \"axes\", \"steps\"], [\"S\"])\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x, starts, ends, axes, steps])\n np.testing.assert_almost_equal(output[\"S\"], x[0:2:2, 0:2:-2, 0:2:-1])\n\n def test_softplus(self):\n node_def = helper.make_node(\"Softplus\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.log(np.exp(x) + 1))\n\n def test_softsign(self):\n node_def = helper.make_node(\"Softsign\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], x / (1 + np.abs(x)))\n\n def test_space_to_depth(self):\n node_def = helper.make_node(\"SpaceToDepth\", [\"X\"], [\"Y\"], blocksize=2)\n x_shape = [1, 3, 2, 2]\n x = self._get_rnd_float32(shape=x_shape)\n output = run_node(node_def, [x])\n x = np.transpose(x, (0, 2, 3, 1))\n y = np.reshape(\n np.swapaxes(x.reshape(1, 1, 1, 1, 1, 12), 2, 3), (1, 1, 1, 12))\n y = np.transpose(y, (0, 3, 1, 2))\n np.testing.assert_allclose(output[\"Y\"], y, rtol=1e-3)\n\n def test_split(self):\n split = [3, 3, 4]\n node_def = helper.make_node(\n \"Split\", [\"X\"], [\"Z%i\" % i for i in range(len(split))],\n axis=0,\n split=split)\n x = self._get_rnd_float32(shape=[100]).reshape([10, 10])\n\n output = run_node(node_def, [x])\n for a, b in zip(list(output), np.split(x, np.cumsum(split))[:-1]):\n np.testing.assert_almost_equal(a, b)\n\n def test_sqrt(self):\n node_def = helper.make_node(\"Sqrt\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000]) + 1.0\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.sqrt(x), decimal=5)\n\n def test_squeeze(self):\n node_def = helper.make_node(\"Squeeze\", [\"X\"], [\"Y\"], axes=[2])\n x = np.array([[[0], [1], [2]]])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.squeeze(x, axis=2))\n\n def test_sub(self):\n node_def = helper.make_node(\"Sub\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[10, 10])\n y = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.subtract(x, y))\n\n def test_sum(self):\n node_def = helper.make_node(\"Sum\", [\"X1\", \"X2\", \"X3\", \"X4\"], [\"Z\"])\n x1 = self._get_rnd_float32(shape=[10, 10])\n x2 = self._get_rnd_float32(shape=[10, 10])\n x3 = self._get_rnd_float32(shape=[10, 10])\n x4 = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x1, x2, x3, x4])\n test_output = x1 + x2 + x3 + x4\n np.testing.assert_almost_equal(output[\"Z\"], test_output)\n\n def test_tanh(self):\n node_def = helper.make_node(\"Tanh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000]) + 1.0\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.tanh(x), decimal=5)\n\n def test_thresholded_relu(self):\n alpha = 2.0\n node_def = helper.make_node(\n \"ThresholdedRelu\", [\"X\"], [\"Y\"], alpha=alpha)\n x = self._get_rnd_float32(-3.0, 3.0, [10])\n y = np.clip(x, alpha, np.inf)\n y[y == alpha] = 0\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], y)\n\n def test_tile(self):\n if legacy_onnx_pre_ver(1, 2):\n raise unittest.SkipTest(\n \"The current version of ONNX does not record correctly the opset of Tile.\"\n )\n node_def = helper.make_node(\"Tile\", [\"X1\", \"X2\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[3, 5, 5, 3])\n repeats = [1, 1, 2, 1]\n output = run_node(node_def, [x, repeats])\n np.testing.assert_allclose(output[\"Z\"], np.tile(x, repeats), rtol=1e-3)\n\n def test_transpose(self):\n node_def = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 1])\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.transpose(x, (0, 2, 1)))\n\n def test_topk(self):\n x = np.arange(15, dtype=np.float32).reshape(3, 5)\n values = np.array([[4, 3], [9, 8], [14, 13]], dtype=np.float32)\n indices = np.array([[4, 3], [4, 3], [4, 3]], dtype=np.int64)\n if legacy_opset_pre_ver(10): # for opset = 1\n node_def = helper.make_node(\"TopK\", [\"x\"], [\"values\", \"indices\"], k=2)\n output = run_node(node_def, [x])\n elif legacy_opset_pre_ver(11): # for opset = 10\n k = np.array([2], dtype=np.int64)\n node_def = helper.make_node(\"TopK\", [\"x\", \"k\"], [\"values\", \"indices\"])\n output = run_node(node_def, [x, k])\n else: # for opset = 11\n x = np.array([[3, 2, 5, 10, 7], [12, 15, 10, 7, 20], [21, 16, 5, 3, 6]],\n dtype=np.float32)\n values = np.array([[3, 2], [10, 7], [5, 3]], dtype=np.float32)\n indices = np.array([[0, 1], [2, 3], [2, 3]], dtype=np.int64)\n k = np.array([2], dtype=np.int64)\n node_def = helper.make_node(\n \"TopK\", [\"x\", \"k\"], [\"values\", \"indices\"], largest=0, sorted=0)\n output = run_node(node_def, [x, k])\n np.testing.assert_almost_equal(output[\"values\"], values)\n np.testing.assert_almost_equal(output[\"indices\"], indices)\n\n def test_where(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Where.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Where\", [\"C\", \"X\", \"Y\"], [\"Z\"])\n c = np.array([[1, 0], [1, 1]], dtype=np.bool)\n x = np.array([[1, 2], [3, 4]], dtype=np.float32)\n y = np.array([[9, 8], [7, 6]], dtype=np.float32)\n output = run_node(node_def, [c, x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.where(c, x, y))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.ones", "numpy.multiply", "numpy.sum", "numpy.vectorize", "numpy.subtract", "numpy.testing.assert_equal", "numpy.arctanh", "numpy.size", "numpy.lib.pad", "numpy.add", "numpy.log", "numpy.tanh", "numpy.testing.assert_almost_equal", "numpy.transpose", "numpy.arccosh", "numpy.arcsinh", "numpy.argmin", "numpy.concatenate", "numpy.reshape", "numpy.abs", "numpy.random.rand", "numpy.isnan", "numpy.negative", "numpy.where", "numpy.expm1", "numpy.nonzero", "numpy.round", "numpy.mean", "numpy.minimum", "numpy.random.uniform", "numpy.sqrt", "numpy.tile", "numpy.eye", "numpy.ceil", "numpy.zeros", "tensorflow.sparse_to_dense", "numpy.isposinf", "numpy.float32", "numpy.sinh", "numpy.argmax", "numpy.mod", "numpy.int32", "numpy.arange", "numpy.max", "numpy.power", "numpy.min", "tensorflow.Session", "numpy.prod", "numpy.maximum", "numpy.std", "numpy.square", "numpy.linalg.norm", "numpy.cosh", "numpy.matmul", "numpy.compress", "numpy.divide", "numpy.sign", "numpy.squeeze", "numpy.cumsum", "numpy.isinf", "numpy.fmod", "numpy.floor", "numpy.random.randn", "numpy.exp", "numpy.clip", "numpy.shape", "numpy.testing.assert_allclose", "numpy.array", "numpy.dot", "numpy.random.randint", "numpy.isneginf" ] ]
Martin36/tapas
[ "2987658c3b65c5ab6e698d6c57823dc30d3d0f96" ]
[ "tapas/models/tapas_classifier_model_utils.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"TAPAS BERT model utils for classification.\"\"\"\n\nfrom typing import Dict, Text, Tuple, Optional\nfrom tapas.models import segmented_tensor\nimport tensorflow.compat.v1 as tf\n\nEPSILON_ZERO_DIVISION = 1e-10\nCLOSE_ENOUGH_TO_LOG_ZERO = -10000.0\n\n\ndef classification_initializer():\n \"\"\"Classification layer initializer.\"\"\"\n return tf.truncated_normal_initializer(stddev=0.02)\n\n\ndef extract_answer_from_features(\n features, use_answer_as_supervision\n):\n \"\"\"Extracts the answer, numeric_values, numeric_values_scale.\"\"\"\n if use_answer_as_supervision:\n answer = tf.squeeze(features[\"answer\"], axis=[1])\n numeric_values = features[\"numeric_values\"]\n numeric_values_scale = features[\"numeric_values_scale\"]\n else:\n answer = None\n numeric_values = None\n numeric_values_scale = None\n return answer, numeric_values, numeric_values_scale\n\n\ndef compute_token_logits(output_layer, temperature,\n init_cell_selection_weights_to_zero):\n \"\"\"Computes logits per token.\n\n Args:\n output_layer: <float>[batch_size, seq_length, hidden_dim] Output of the\n encoder layer.\n temperature: float Temperature for the Bernoulli distribution.\n init_cell_selection_weights_to_zero: Whether the initial weights should be\n set to 0. This ensures that all tokens have the same prior probability.\n\n Returns:\n <float>[batch_size, seq_length] Logits per token.\n \"\"\"\n hidden_size = output_layer.shape.as_list()[-1]\n output_weights = tf.get_variable(\n \"output_weights\", [hidden_size],\n initializer=tf.zeros_initializer()\n if init_cell_selection_weights_to_zero else classification_initializer())\n output_bias = tf.get_variable(\n \"output_bias\", shape=(), initializer=tf.zeros_initializer())\n logits = (tf.einsum(\"bsj,j->bs\", output_layer, output_weights) +\n output_bias) / temperature\n return logits\n\n\n# TODO(eisenjulian): Move more methods from tapas_classifier_model\ndef compute_column_logits(output_layer,\n cell_index,\n cell_mask,\n init_cell_selection_weights_to_zero,\n allow_empty_column_selection):\n \"\"\"Computes logits for each column.\n\n Args:\n output_layer: <float>[batch_size, seq_length, hidden_dim] Output of the\n encoder layer.\n cell_index: segmented_tensor.IndexMap [batch_size, seq_length] Index that\n groups tokens into cells.\n cell_mask: <float>[batch_size, max_num_rows * max_num_cols] Input mask per\n cell, 1 for cells that exists in the example and 0 for padding.\n init_cell_selection_weights_to_zero: Whether the initial weights should be\n set to 0. This is also applied to column logits, as they are used to\n select the cells. This ensures that all columns have the same prior\n probability.\n allow_empty_column_selection: Allow to select no column.\n\n Returns:\n <float>[batch_size, max_num_cols] Logits per column. Logits will be set to\n a very low value (such that the probability is 0) for the special id 0\n (which means \"outside the table\") or columns that do not apear in the\n table.\n \"\"\"\n hidden_size = output_layer.shape.as_list()[-1]\n column_output_weights = tf.get_variable(\n \"column_output_weights\", [hidden_size],\n initializer=tf.zeros_initializer()\n if init_cell_selection_weights_to_zero else classification_initializer())\n column_output_bias = tf.get_variable(\n \"column_output_bias\", shape=(), initializer=tf.zeros_initializer())\n token_logits = (\n tf.einsum(\"bsj,j->bs\", output_layer, column_output_weights) +\n column_output_bias)\n\n # Average the logits per cell and then per column.\n # Note that by linearity it doesn't matter if we do the averaging on the\n # embeddings or on the logits. For performance we do the projection first.\n # [batch_size, max_num_cols * max_num_rows]\n cell_logits, cell_logits_index = segmented_tensor.reduce_mean(\n token_logits, cell_index)\n\n column_index = cell_index.project_inner(cell_logits_index)\n # [batch_size, max_num_cols]\n column_logits, out_index = segmented_tensor.reduce_sum(\n cell_logits * cell_mask, column_index)\n cell_count, _ = segmented_tensor.reduce_sum(cell_mask, column_index)\n column_logits /= cell_count + EPSILON_ZERO_DIVISION\n\n # Mask columns that do not appear in the example.\n is_padding = tf.logical_and(cell_count < 0.5,\n tf.not_equal(out_index.indices, 0))\n column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32)\n\n if not allow_empty_column_selection:\n column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(\n tf.equal(out_index.indices, 0), tf.float32)\n\n return column_logits\n" ]
[ [ "tensorflow.compat.v1.not_equal", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.truncated_normal_initializer", "tensorflow.compat.v1.squeeze", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.einsum", "tensorflow.compat.v1.equal" ] ]
suhongkim/SSD-Vehicle-Detector
[ "8337d237f4c7923e55d02747ec37a60681e5beff" ]
[ "vehicle_detection.py" ]
[ "import os\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom cityscape_dataset import CityScapeDataset\r\nfrom ssd_util import load_dataset_list, load_dataset_list_original, show_loss, show_log\r\nfrom ssd_net import SSD\r\nfrom ssd_train import train_net\r\nfrom ssd_test import test_net\r\n\r\n\r\nif __name__ == '__main__':\r\n # Define Label Group\r\n dataset_label_group = {\r\n 'background': [],\r\n 'sm_veh': ['motorcycle', 'motorcyclegroup', 'bicycle', 'bicyclegroup'],\r\n 'med_veh': ['car', 'cargroup'],\r\n # 'ego_veh': ['ego vehicle'],\r\n 'big_veh': ['bus', 'trailer', 'truck'],\r\n # 'people': ['person', 'persongroup'],\r\n # 'riders': ['rider', 'ridergroup']\r\n }\r\n\r\n # Define Configurations\r\n config = {'is_gpu': True,\r\n 'debug': False,\r\n 'n_aug': 1,\r\n 'n_batch': 64,\r\n 'n_worker': 4,\r\n 'lr': 0.001,\r\n 'max_epoch': 100,\r\n 'save_epochs': [10,20,30,40,50,60,70,80,90],\r\n 'is_lr_scheduled': False,\r\n # 'class_labels': ['background', 'cargroup'],\r\n # 'class_labels': ['background', 'persongroup', 'person', 'cargroup', 'car'],\r\n 'label_groups': dataset_label_group,\r\n 'class_labels': list(dataset_label_group.keys()),\r\n 'is_train': True,\r\n 'is_test': True,\r\n 'results_path': '/home/suhongk/sfuhome/CMPT742/Lab3/vehicle_detection_v2/results/SSD__28th_16:47_best_model.pth'\r\n }\r\n\r\n # crop original image\r\n # person + persongroup , car+Cargroup\r\n # Overfitted data for the unaug\r\n # check training set\r\n\r\n # Default Cuda Setting -------------------------------------------------\r\n from torch.multiprocessing import Pool, Process, set_start_method\r\n try:\r\n set_start_method('spawn')\r\n except RuntimeError:\r\n pass\r\n\r\n if torch.cuda.is_available():\r\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\r\n torch.backends.cudnn.benchmark = True\r\n\r\n # load dataset_list -------------------------------------------------\r\n if config['is_gpu']:\r\n sample_path = '/home/datasets/full_dataset/train_extra/'\r\n label_path = '/home/datasets/full_dataset_labels/train_extra'\r\n else:\r\n sample_path = '../cityscapes_samples/'\r\n label_path = '../cityscapes_samples_labels/'\r\n\r\n dataset_list = load_dataset_list(sample_path, label_path, config['label_groups'])\r\n # dataset_list = load_dataset_list_original(sample_path, label_path, config['class_labels'])\r\n # Define dataset/dataloader -------------------------------------------\r\n num_train = int(0.3 * len(dataset_list))\r\n num_valid = int(0.1 * len(dataset_list))\r\n if config['is_train']:\r\n train_dataset = CityScapeDataset(dataset_list[:num_train], n_augmented=config['n_aug'], debug=config['debug'])\r\n train_loader = DataLoader(train_dataset, batch_size=config['n_batch'], shuffle=True, num_workers=config['n_worker'])\r\n print('Total training items: ', len(train_dataset))\r\n print('Total training batches size in one epoch: ', len(train_loader))\r\n\r\n valid_dataset = CityScapeDataset(dataset_list[num_train:(num_train + num_valid)], debug=config['debug'])\r\n valid_loader = DataLoader(valid_dataset, batch_size=config['n_batch'], shuffle=True, num_workers=config['n_worker'])\r\n print('Total validating items: ', len(valid_dataset))\r\n print('Total validating batches size in one epoch: ', len(valid_loader))\r\n\r\n if config['is_test']:\r\n test_dataset = CityScapeDataset(dataset_list[(num_train + num_valid):], debug=config['debug'])\r\n print('Total testing items: ', len(test_dataset))\r\n\r\n # Train network -----------------------------------------------------\r\n if config['is_train']:\r\n lab_results_dir = \"./results/\" # for the results\r\n results_path = train_net(train_loader, valid_loader, config['class_labels'], lab_results_dir,\r\n learning_rate=config['lr'], is_lr_scheduled=config['is_lr_scheduled'],\r\n max_epoch=config['max_epoch'], save_epochs=config['save_epochs'])\r\n print('\\n\\n-----------------------\\n\\tresult_path:', results_path)\r\n if not config['is_gpu']:\r\n show_loss(results_path + '.loss')\r\n # show_log(results_path + '__train.log')\r\n # show_log(results_path + '__valid.log')\r\n if config['is_test']:\r\n test_net(test_dataset, config['class_labels'], (results_path + '__model.pth'))\r\n # Train network -----------------------------------------------------\r\n if config['is_test'] and not config['is_train']:\r\n test_net(test_dataset, config['class_labels'], config['results_path'])\r\n # pass\r\n # Test Code ----------------------------------------------------------\r\n # idx, (imgs, bbox_label, bbox_indices, _) = next(enumerate(train_loader))\r\n # print(bbox_indices)\r\n # test_dataset.__getitem__(9)\r\n # net = SSD(len(class_labels))\r\n # net.cuda()\r\n # net.forward(torch.rand(1, 3, 300, 300))\r\n\r\n\r\n\r\n\r\n" ]
[ [ "torch.set_default_tensor_type", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.multiprocessing.set_start_method" ] ]
DavidLesnjak/CMSIS_5
[ "e0848410d137758a3356a5ee94ca4501cea708a8" ]
[ "CMSIS/DSP/Examples/ARM/arm_bayes_example/train.py" ]
[ "from sklearn.naive_bayes import GaussianNB\nimport random\nimport numpy as np\nimport math\n\nfrom pylab import scatter,figure, clf, plot, xlabel, ylabel, xlim, ylim, title, grid, axes, show,semilogx, semilogy\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\n\n# Generation of data to train the classifier\n# 100 vectors are generated. Vector have dimension 2 so can be represented as points\nNBVECS = 100\nVECDIM = 2\n\n# 3 cluster of points are generated\nballRadius = 1.0\nx1 = [1.5, 1] + ballRadius * np.random.randn(NBVECS,VECDIM)\nx2 = [-1.5, 1] + ballRadius * np.random.randn(NBVECS,VECDIM)\nx3 = [0, -3] + ballRadius * np.random.randn(NBVECS,VECDIM)\n\n# All points are concatenated\nX_train=np.concatenate((x1,x2,x3))\n\n# The classes are 0,1 and 2.\nY_train=np.concatenate((np.zeros(NBVECS),np.ones(NBVECS),2*np.ones(NBVECS)))\n\ngnb = GaussianNB()\ngnb.fit(X_train, Y_train)\n\nprint(\"Testing\")\ny_pred = gnb.predict([[1.5,1.0]])\nprint(y_pred)\n\ny_pred = gnb.predict([[-1.5,1.0]])\nprint(y_pred)\n\ny_pred = gnb.predict([[0,-3.0]])\nprint(y_pred)\n\n# Dump of data for CMSIS-DSP\n\nprint(\"Parameters\")\n# Gaussian averages\nprint(\"Theta = \",list(np.reshape(gnb.theta_,np.size(gnb.theta_))))\n\n# Gaussian variances\nprint(\"Sigma = \",list(np.reshape(gnb.sigma_,np.size(gnb.sigma_))))\n\n# Class priors\nprint(\"Prior = \",list(np.reshape(gnb.class_prior_,np.size(gnb.class_prior_))))\n\nprint(\"Epsilon = \",gnb.epsilon_)\n\n\n# Some bounds are computed for the graphical representation\nx_min = X_train[:, 0].min()\nx_max = X_train[:, 0].max()\ny_min = X_train[:, 1].min()\ny_max = X_train[:, 1].max()\n\nfont = FontProperties()\nfont.set_size(20)\n\nr=plt.figure()\nplt.axis('off')\nplt.text(1.5,1.0,\"A\", verticalalignment='center', horizontalalignment='center',fontproperties=font)\nplt.text(-1.5,1.0,\"B\",verticalalignment='center', horizontalalignment='center', fontproperties=font)\nplt.text(0,-3,\"C\", verticalalignment='center', horizontalalignment='center',fontproperties=font)\nscatter(x1[:,0],x1[:,1],s=1.0,color='#FF6B00')\nscatter(x2[:,0],x2[:,1],s=1.0,color='#95D600')\nscatter(x3[:,0],x3[:,1],s=1.0,color='#00C1DE')\n#r.savefig('fig.jpeg')\n#plt.close(r)\nshow()" ]
[ [ "numpy.ones", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "numpy.random.randn", "numpy.size", "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.text", "numpy.concatenate", "sklearn.naive_bayes.GaussianNB" ] ]
allen050883/Project
[ "22a9f1e466e595d8808e59fc58801881f3399df4" ]
[ "kaggle_SIIM-ACR_Pneumothorax_Segmentation/utils/loss.py" ]
[ "import torch\nimport torch.nn.functional as F\n\ndef dice_score(inputs, targets, smooth=1):\n # Flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum() \n dice_score = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth) \n \n return dice_score\n\ndef get_dice_loss(inputs, targets, smooth=1):\n # Flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum() \n dice_loss = 1 - (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth) \n \n return dice_loss\n\ndef get_focal_loss(inputs, targets, alpha=0.8, gamma=2): \n # Flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n # First compute binary cross-entropy \n BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')\n BCE_EXP = torch.exp(-BCE)\n focal_loss = alpha * (1-BCE_EXP)**gamma * BCE\n \n return focal_loss\n\ndef combo_loss(inputs, targets):\n dice_loss = get_dice_loss(inputs, targets)\n BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')\n focal_loss = get_focal_loss(inputs, targets)\n \n return focal_loss - torch.log(1-dice_loss)\n" ]
[ [ "torch.log", "torch.nn.functional.binary_cross_entropy", "torch.exp" ] ]
mrsiegfried/read-ICESat-2
[ "1406b92691d284616ca6c9d72646eca4592d1f1d" ]
[ "scripts/interp_sea_level_ICESat2_ATL07.py" ]
[ "#!/usr/bin/env python\nu\"\"\"\ninterp_sea_level_ICESat2_ATL07.py\nWritten by Tyler Sutterley (05/2021)\nInterpolates sea level anomalies (sla), absolute dynamic topography (adt) and\n mean dynamic topography (mdt) to times and locations of ICESat-2 ATL07 data\n\nhttps://www.aviso.altimetry.fr/en/data/products/sea-surface-height-products/\n global/msla-h.html\nftp://ftp.sltac.cls.fr/Core/SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047/\n dataset-duacs-rep-global-merged-allsat-phy-l4-v3\n\nNote that the AVISO sea level data are gzip compressed netCDF4 files\n\nCOMMAND LINE OPTIONS:\n -D X, --directory X: Working data directory\n -V, --verbose: Output information about each created file\n -M X, --mode X: Permission mode of directories and files created\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n pyproj: Python interface to PROJ library\n https://pypi.org/project/pyproj/\n scikit-learn: Machine Learning in Python\n https://scikit-learn.org/stable/index.html\n https://github.com/scikit-learn/scikit-learn\n h5py: Python interface for Hierarchal Data Format 5 (HDF5)\n https://h5py.org\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n\nPROGRAM DEPENDENCIES:\n read_ICESat2_ATL07.py: reads ICESat-2 sea ice height data files\n time.py: utilities for calculating time operations\n utilities.py: download and management utilities for syncing files\n\nUPDATE HISTORY:\n Updated 05/2021: print full path of output filename\n Written 03/2021\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport re\nimport gzip\nimport h5py\nimport pyproj\nimport netCDF4\nimport argparse\nimport datetime\nimport numpy as np\nimport sklearn.neighbors\nimport icesat2_toolkit.time\nfrom icesat2_toolkit.read_ICESat2_ATL07 import read_HDF5_ATL07\n\n#-- PURPOSE: set the hemisphere of interest based on the granule\ndef set_hemisphere(GRANULE):\n if GRANULE in ('10','11','12'):\n projection_flag = 'S'\n elif GRANULE in ('03','04','05'):\n projection_flag = 'N'\n return projection_flag\n\n#-- PURPOSE: interpolates to coordinates with inverse distance weighting\ndef inverse_distance(x, y, z, xi, yi, SEARCH='BallTree', N=10, POWER=2.0):\n #-- number of output points\n npts = len(xi)\n #-- create neighbors object for coordinates\n if (SEARCH == 'BallTree'):\n tree = sklearn.neighbors.BallTree(np.c_[x,y])\n elif (SEARCH == 'KDTree'):\n tree = sklearn.neighbors.KDTree(np.c_[x,y])\n #-- query the search tree to find the N closest points\n dist,indices = tree.query(np.c_[xi,yi], k=N, return_distance=True)\n #-- normalized weights if POWER > 0 (typically between 1 and 3)\n #-- in the inverse distance weighting\n power_inverse_distance = dist**(-POWER)\n s = np.sum(power_inverse_distance, axis=1)\n w = power_inverse_distance/np.broadcast_to(s[:,None],(npts,N))\n #-- calculate interpolated fields by inverse distance weighting\n return np.sum(w*z[indices],axis=1)\n\n#-- PURPOSE interpolate sea level anomalies to lat/lon and then to time\ndef interpolate_sea_level(base_dir, xi, yi, CJD, HEM):\n #-- EPSG projections for converting lat/lon to polar stereographic\n EPSG = dict(N=3413,S=3031)\n #-- pyproj transformer for converting to polar stereographic\n crs1 = pyproj.CRS.from_string('epsg:4326')\n crs2 = pyproj.CRS.from_string(EPSG[HEM])\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n\n #-- interpolate mean dynamic topography\n input_file = 'mdt_cnes_cls2013_global.nc.gz'\n #-- read bytes from compressed file\n fd = gzip.open(os.path.join(base_dir,input_file),'rb')\n #-- dictionary with input fields\n dinput = {}\n #-- read netCDF file for mean dynamic topography\n with netCDF4.Dataset('mdt', mode='r', memory=fd.read()) as fileID:\n dinput['lon'] = fileID['lon'][:].copy()\n dinput['lat'] = fileID['lat'][:].copy()\n dinput['mdt'] = np.ma.array(fileID['mdt'][0,:,:].copy(),\n fill_value=fileID['mdt']._FillValue)\n dinput['mdt'].mask = (dinput['mdt'].data == dinput['mdt'].fill_value)\n #-- close the compressed file objects\n fd.close()\n #-- create 2-D grid coordinates from longitude and latitude vectors\n gridlon,gridlat = np.meshgrid(dinput['lon'],dinput['lat'])\n #-- convert from latitude/longitude into polar stereographic\n xg,yg = transformer.transform(gridlon,gridlat)\n\n #-- reduce to local coordinates to improve computational time\n gridmask = np.logical_not(dinput['mdt'].mask)\n if (HEM.upper() == 'N'):\n gridmask &= (gridlat >= 50.0)\n elif (HEM.upper() == 'S'):\n gridmask &= (gridlat <= -50.0)\n indy,indx = np.nonzero(gridmask)\n #-- calculate mean dynamic topography by inverse distance weighting\n MDT = inverse_distance(xg[indy,indx], yg[indy,indx],\n dinput['mdt'].data[indy,indx], xi, yi)\n\n #-- CNES Julian Days before and after measurement\n CJD1 = np.floor(CJD)\n #-- scale for linearly interpolating to date\n dt = (CJD - CJD1[0])\n #-- output sea level anomaly and absolute dynamic topography\n SLA = np.zeros_like(CJD)\n ADT = np.zeros_like(CJD)\n #-- for the range of dates\n for day in range(2):\n #-- convert from CNES Julians Days to calendar dates for time\n JD1 = CJD1 + day + 2433282.5\n YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(JD1[0],\n FORMAT='tuple', ASTYPE=int)\n #-- sea level directory\n ddir = os.path.join(base_dir, '{0:0.0f}'.format(YY))\n #-- input file for day before the measurement\n regex = re.compile(('dt_global_allsat_phy_l4_{0:4d}{1:02d}{2:02d}_'\n '(\\d{{4}})(\\d{{2}})(\\d{{2}}).nc.gz').format(YY,MM,DD))\n input_file, = [fi for fi in os.listdir(ddir) if regex.match(fi)]\n #-- dictionary with input fields\n dinput = {}\n #-- read bytes from compressed file\n fd = gzip.open(os.path.join(ddir,input_file),'rb')\n #-- read netCDF file for time\n with netCDF4.Dataset('sla', mode='r', memory=fd.read()) as fileID:\n dinput['lon'] = fileID['lon'][:].copy()\n dinput['lat'] = fileID['lat'][:].copy()\n dinput['sla'] = np.ma.array(fileID['sla'][0,:,:].copy(),\n fill_value=fileID['sla']._FillValue)\n dinput['adt'] = np.ma.array(fileID['adt'][0,:,:].copy(),\n fill_value=fileID['adt']._FillValue)\n #-- close the compressed file objects\n fd.close()\n #-- for each variable to interpolate\n out = {}\n for var in ['sla','adt']:\n #-- reduce to local coordinates to improve computational time\n gridmask = np.logical_not(dinput[var].mask)\n if (HEM.upper() == 'N'):\n gridmask &= (gridlat >= 50.0)\n elif (HEM.upper() == 'S'):\n gridmask &= (gridlat <= -50.0)\n indy,indx = np.nonzero(gridmask)\n #-- calculate variable by inverse distance weighting\n out[var] = inverse_distance(xg[indy,indx], yg[indy,indx],\n dinput[var].data[indy,indx], xi, yi)\n #-- linearly interpolate to date for iteration\n SLA += out['sla']*(2.0*dt*day - dt - day + 1.0)\n ADT += out['adt']*(2.0*dt*day - dt - day + 1.0)\n #-- return interpolated values\n return dict(h_mdt=MDT,h_sla=SLA,h_adt=ADT)\n\n#-- PURPOSE: read ICESat-2 sea ice height (ATL07) from NSIDC\n#-- interpolate AVISO sea level at points and times\ndef interp_sea_level_ICESat2(base_dir, FILE, VERBOSE=False, MODE=0o775):\n\n #-- read data from input_file\n print('{0} -->'.format(os.path.basename(FILE))) if VERBOSE else None\n IS2_atl07_mds,IS2_atl07_attrs,IS2_atl07_beams = read_HDF5_ATL07(FILE,\n ATTRIBUTES=True)\n DIRECTORY = os.path.dirname(FILE)\n #-- extract parameters from ICESat-2 ATLAS HDF5 sea ice file name\n rx = re.compile(r'(processed_)?(ATL\\d{2})-(\\d{2})_(\\d{4})(\\d{2})(\\d{2})'\n r'(\\d{2})(\\d{2})(\\d{2})_(\\d{4})(\\d{2})(\\d{2})_(\\d{3})_(\\d{2})(.*?).h5$')\n SUB,PRD,HMN,YY,MM,DD,HH,MN,SS,TRK,CYCL,SN,RL,VERS,AUX=rx.findall(FILE).pop()\n #-- set the hemisphere flag based on ICESat-2 granule\n HEM = set_hemisphere(HMN)\n\n #-- HDF5 file attributes\n attrib = {}\n #-- mean dynamic topography\n attrib['h_mdt'] = {}\n attrib['h_mdt']['long_name'] = 'Mean Dynamic Topography'\n attrib['h_mdt']['description'] = 'Sea surface height above geoid'\n attrib['h_mdt']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n #-- sea level anomalies\n attrib['h_sla'] = {}\n attrib['h_sla']['long_name'] = 'Sea Level Anomaly'\n attrib['h_sla']['description'] = 'Sea surface anomalies'\n attrib['h_sla']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n #-- absolute dynamic topography\n attrib['h_adt'] = {}\n attrib['h_adt']['long_name'] = 'Absolute Dynamic Topography'\n attrib['h_adt']['description'] = ('Sea surface height above geoid calculated '\n 'by adding the mean dynamic topography to the sea level anomalies')\n attrib['h_adt']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n\n #-- EPSG projections for converting lat/lon to polar stereographic\n EPSG = dict(N=3413,S=3031)\n #-- pyproj transformer for converting to polar stereographic\n crs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\n crs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(EPSG[HEM]))\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n\n #-- number of GPS seconds between the GPS epoch\n #-- and ATLAS Standard Data Product (SDP) epoch\n atlas_sdp_gps_epoch = IS2_atl07_mds['ancillary_data']['atlas_sdp_gps_epoch']\n\n #-- copy variables for outputting to HDF5 file\n IS2_atl07_corr = {}\n IS2_atl07_fill = {}\n IS2_atl07_dims = {}\n IS2_atl07_corr_attrs = {}\n #-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)\n #-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)\n #-- Add this value to delta time parameters to compute full gps_seconds\n IS2_atl07_corr['ancillary_data'] = {}\n IS2_atl07_corr_attrs['ancillary_data'] = {}\n for key in ['atlas_sdp_gps_epoch']:\n #-- get each HDF5 variable\n IS2_atl07_corr['ancillary_data'][key] = IS2_atl07_mds['ancillary_data'][key]\n #-- Getting attributes of group and included variables\n IS2_atl07_corr_attrs['ancillary_data'][key] = {}\n for att_name,att_val in IS2_atl07_attrs['ancillary_data'][key].items():\n IS2_atl07_corr_attrs['ancillary_data'][key][att_name] = att_val\n #-- for each input beam within the file\n for gtx in sorted(IS2_atl07_beams):\n #-- output data dictionaries for beam\n IS2_atl07_corr[gtx] = dict(sea_ice_segments={})\n IS2_atl07_fill[gtx] = dict(sea_ice_segments={})\n IS2_atl07_dims[gtx] = dict(sea_ice_segments={})\n IS2_atl07_corr_attrs[gtx] = dict(sea_ice_segments={})\n\n #-- number of segments\n val = IS2_atl07_mds[gtx]['sea_ice_segments']\n n_seg = len(val['height_segment_id'])\n\n #-- convert time from ATLAS SDP to CNES JD\n #-- days relative to 1950-01-01T00:00:00\n gps_seconds = atlas_sdp_gps_epoch + val['delta_time']\n leap_seconds = icesat2_toolkit.time.count_leap_seconds(gps_seconds)\n cnes_time = icesat2_toolkit.time.convert_delta_time(gps_seconds-leap_seconds,\n epoch1=(1980,1,6,0,0,0), epoch2=(1950,1,1,0,0,0), scale=1.0/86400.0)\n\n #-- extract lat/lon and convert to polar stereographic\n X,Y = transformer.transform(val['longitude'],val['latitude'])\n\n #-- interpolate sea level anomalies and dynamic topographies\n interp = interpolate_sea_level(base_dir,X,Y,cnes_time,HEM)\n\n #-- group attributes for beam\n IS2_atl07_corr_attrs[gtx]['Description'] = IS2_atl07_attrs[gtx]['Description']\n IS2_atl07_corr_attrs[gtx]['atlas_pce'] = IS2_atl07_attrs[gtx]['atlas_pce']\n IS2_atl07_corr_attrs[gtx]['atlas_beam_type'] = IS2_atl07_attrs[gtx]['atlas_beam_type']\n IS2_atl07_corr_attrs[gtx]['groundtrack_id'] = IS2_atl07_attrs[gtx]['groundtrack_id']\n IS2_atl07_corr_attrs[gtx]['atmosphere_profile'] = IS2_atl07_attrs[gtx]['atmosphere_profile']\n IS2_atl07_corr_attrs[gtx]['atlas_spot_number'] = IS2_atl07_attrs[gtx]['atlas_spot_number']\n IS2_atl07_corr_attrs[gtx]['sc_orientation'] = IS2_atl07_attrs[gtx]['sc_orientation']\n #-- group attributes for sea_ice_segments\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['Description'] = (\"Top group for sea \"\n \"ice segments as computed by the ATBD algorithm.\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['data_rate'] = (\"Data within this \"\n \"group are stored at the variable segment rate.\")\n\n #-- geolocation, time and segment ID\n #-- delta time\n IS2_atl07_corr[gtx]['sea_ice_segments']['delta_time'] = val['delta_time'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['delta_time'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['delta_time'] = None\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['units'] = \"seconds since 2018-01-01\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['long_name'] = \"Elapsed GPS seconds\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['standard_name'] = \"time\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['source'] = \"telemetry\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['calendar'] = \"standard\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['description'] = (\"Number of \"\n \"GPS seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch \"\n \"offset is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS \"\n \"seconds between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP \"\n \"epoch. By adding the offset contained within atlas_sdp_gps_epoch to delta time \"\n \"parameters, the time in gps_seconds relative to the GPS epoch can be computed.\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['coordinates'] = \\\n \"height_segment_id latitude longitude\"\n #-- latitude\n IS2_atl07_corr[gtx]['sea_ice_segments']['latitude'] = val['latitude'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['latitude'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['latitude'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['units'] = \"degrees_north\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['contentType'] = \"physicalMeasurement\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['long_name'] = \"Latitude\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['standard_name'] = \"latitude\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['description'] = (\"Latitude of \"\n \"segment center\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['valid_min'] = -90.0\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['valid_max'] = 90.0\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['coordinates'] = \\\n \"height_segment_id delta_time longitude\"\n #-- longitude\n IS2_atl07_corr[gtx]['sea_ice_segments']['longitude'] = val['longitude'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['longitude'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['longitude'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['units'] = \"degrees_east\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['contentType'] = \"physicalMeasurement\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['long_name'] = \"Longitude\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['standard_name'] = \"longitude\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['description'] = (\"Longitude of \"\n \"segment center\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['valid_min'] = -180.0\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['valid_max'] = 180.0\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['coordinates'] = \\\n \"height_segment_id delta_time latitude\"\n #-- segment ID\n IS2_atl07_corr[gtx]['sea_ice_segments']['height_segment_id'] = val['height_segment_id']\n IS2_atl07_fill[gtx]['sea_ice_segments']['height_segment_id'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['height_segment_id'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['units'] = \"1\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['long_name'] = \\\n \"Identifier of each height segment\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['description'] = \\\n \"Identifier of each height segment\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['coordinates'] = \\\n \"delta_time latitude longitude\"\n #-- geolocation segment beginning\n IS2_atl07_corr[gtx]['sea_ice_segments']['geoseg_beg'] = val['geoseg_beg'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['geoseg_beg'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['geoseg_beg'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['units'] = \"1\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['long_name'] = \"Beginning GEOSEG\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['description'] = \\\n \"Geolocation segment (geoseg) ID associated with the first photon used in this sea ice segment\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['coordinates'] = \\\n \"height_segment_id delta_time latitude longitude\"\n #-- geolocation segment ending\n IS2_atl07_corr[gtx]['sea_ice_segments']['geoseg_end'] = val['geoseg_end'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['geoseg_end'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['geoseg_end'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['units'] = \"1\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['long_name'] = \"Ending GEOSEG\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['description'] = \\\n \"Geolocation segment (geoseg) ID associated with the last photon used in this sea ice segment\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['coordinates'] = \\\n \"height_segment_id delta_time latitude longitude\"\n #-- along track distance\n IS2_atl07_corr[gtx]['sea_ice_segments']['seg_dist_x'] = val['seg_dist_x'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['seg_dist_x'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['seg_dist_x'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['units'] = \"meters\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['long_name'] = \"Along track distance\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['description'] = \\\n \"Along-track distance from the equator crossing to the segment center.\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['coordinates'] = \\\n \"height_segment_id delta_time latitude longitude\"\n\n #-- geophysical variables\n IS2_atl07_corr[gtx]['sea_ice_segments']['geophysical'] = {}\n IS2_atl07_fill[gtx]['sea_ice_segments']['geophysical'] = {}\n IS2_atl07_dims[gtx]['sea_ice_segments']['geophysical'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical']['Description'] = (\"Contains geophysical \"\n \"parameters and corrections used to correct photon heights for geophysical effects, such as tides.\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical']['data_rate'] = (\"Data within this group \"\n \"are stored at the sea_ice_height segment rate.\")\n\n #-- interpolated sea level products\n for key,val in interp.items():\n #-- copy output variables\n sea_level = np.ma.zeros((n_seg))\n sea_level.data[:] = np.copy(val)\n #-- replace nan values with fill value\n sea_level.mask = np.isnan(sea_level.data)\n sea_level.data[sea_level.mask] = sea_level.fill_value\n #-- add to output\n IS2_atl07_corr[gtx]['sea_ice_segments']['geophysical'][key] = sea_level.copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['geophysical'][key] = sea_level.fill_value\n IS2_atl07_dims[gtx]['sea_ice_segments']['geophysical'][key] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['units'] = \"meters\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['long_name'] = attrib[key]['long_name']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['description'] = attrib[key]['description']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['source'] = 'AVISO/Copernicus'\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['reference'] = attrib[key]['reference']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['coordinates'] = \\\n \"../height_segment_id ../delta_time ../latitude ../longitude\"\n\n #-- output HDF5 files with interpolated sea level data\n fargs = (PRD,HEM,'AVISO_SEA_LEVEL',YY,MM,DD,HH,MN,SS,TRK,CYCL,SN,RL,VERS,AUX)\n file_format = '{0}-{1}_{2}_{3}{4}{5}{6}{7}{8}_{9}{10}{11}_{12}_{13}{14}.h5'\n output_file = os.path.join(DIRECTORY,file_format.format(*fargs))\n #-- print file information\n print('\\t{0}'.format(output_file)) if VERBOSE else None\n HDF5_ATL07_corr_write(IS2_atl07_corr, IS2_atl07_corr_attrs,\n CLOBBER=True, INPUT=os.path.basename(FILE),\n FILL_VALUE=IS2_atl07_fill, DIMENSIONS=IS2_atl07_dims,\n FILENAME=output_file)\n #-- change the permissions mode\n os.chmod(output_file, MODE)\n\n#-- PURPOSE: outputting the correction values for ICESat-2 data to HDF5\ndef HDF5_ATL07_corr_write(IS2_atl07_corr, IS2_atl07_attrs, INPUT=None,\n FILENAME='', FILL_VALUE=None, DIMENSIONS=None, CLOBBER=False):\n #-- setting HDF5 clobber attribute\n if CLOBBER:\n clobber = 'w'\n else:\n clobber = 'w-'\n\n #-- open output HDF5 file\n fileID = h5py.File(os.path.expanduser(FILENAME), clobber)\n\n #-- create HDF5 records\n h5 = {}\n\n #-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)\n #-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)\n h5['ancillary_data'] = {}\n for k,v in IS2_atl07_corr['ancillary_data'].items():\n #-- Defining the HDF5 dataset variables\n val = 'ancillary_data/{0}'.format(k)\n h5['ancillary_data'][k] = fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, compression='gzip')\n #-- add HDF5 variable attributes\n for att_name,att_val in IS2_atl07_attrs['ancillary_data'][k].items():\n h5['ancillary_data'][k].attrs[att_name] = att_val\n\n #-- write each output beam\n beams = [k for k in IS2_atl07_corr.keys() if bool(re.match(r'gt\\d[lr]',k))]\n for gtx in beams:\n fileID.create_group(gtx)\n #-- add HDF5 group attributes for beam\n for att_name in ['Description','atlas_pce','atlas_beam_type',\n 'groundtrack_id','atmosphere_profile','atlas_spot_number',\n 'sc_orientation']:\n fileID[gtx].attrs[att_name] = IS2_atl07_attrs[gtx][att_name]\n #-- create sea_ice_segments group\n fileID[gtx].create_group('sea_ice_segments')\n h5[gtx] = dict(sea_ice_segments={})\n for att_name in ['Description','data_rate']:\n att_val = IS2_atl07_attrs[gtx]['sea_ice_segments'][att_name]\n fileID[gtx]['sea_ice_segments'].attrs[att_name] = att_val\n\n #-- delta_time, geolocation and segment identification variables\n for k in ['delta_time','latitude','longitude','height_segment_id',\n 'geoseg_beg','geoseg_end','seg_dist_x']:\n #-- values and attributes\n v = IS2_atl07_corr[gtx]['sea_ice_segments'][k]\n attrs = IS2_atl07_attrs[gtx]['sea_ice_segments'][k]\n fillvalue = FILL_VALUE[gtx]['sea_ice_segments'][k]\n #-- Defining the HDF5 dataset variables\n val = '{0}/{1}/{2}'.format(gtx,'sea_ice_segments',k)\n if fillvalue:\n h5[gtx]['sea_ice_segments'][k] = fileID.create_dataset(val,\n np.shape(v), data=v, dtype=v.dtype, fillvalue=fillvalue,\n compression='gzip')\n else:\n h5[gtx]['sea_ice_segments'][k] = fileID.create_dataset(val,\n np.shape(v), data=v, dtype=v.dtype, compression='gzip')\n #-- create or attach dimensions for HDF5 variable\n if DIMENSIONS[gtx]['sea_ice_segments'][k]:\n #-- attach dimensions\n for i,dim in enumerate(DIMENSIONS[gtx]['sea_ice_segments'][k]):\n h5[gtx]['sea_ice_segments'][k].dims[i].attach_scale(\n h5[gtx]['sea_ice_segments'][dim])\n else:\n #-- make dimension\n h5[gtx]['sea_ice_segments'][k].make_scale(k)\n #-- add HDF5 variable attributes\n for att_name,att_val in attrs.items():\n h5[gtx]['sea_ice_segments'][k].attrs[att_name] = att_val\n\n #-- add to geophysical corrections\n key = 'geophysical'\n fileID[gtx]['sea_ice_segments'].create_group(key)\n h5[gtx]['sea_ice_segments'][key] = {}\n for att_name in ['Description','data_rate']:\n att_val=IS2_atl07_attrs[gtx]['sea_ice_segments'][key][att_name]\n fileID[gtx]['sea_ice_segments'][key].attrs[att_name] = att_val\n for k,v in IS2_atl07_corr[gtx]['sea_ice_segments'][key].items():\n #-- attributes\n attrs = IS2_atl07_attrs[gtx]['sea_ice_segments'][key][k]\n fillvalue = FILL_VALUE[gtx]['sea_ice_segments'][key][k]\n #-- Defining the HDF5 dataset variables\n val = '{0}/{1}/{2}/{3}'.format(gtx,'sea_ice_segments',key,k)\n if fillvalue:\n h5[gtx]['sea_ice_segments'][key][k] = \\\n fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, fillvalue=fillvalue, compression='gzip')\n else:\n h5[gtx]['sea_ice_segments'][key][k] = \\\n fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, compression='gzip')\n #-- attach dimensions\n for i,dim in enumerate(DIMENSIONS[gtx]['sea_ice_segments'][key][k]):\n h5[gtx]['sea_ice_segments'][key][k].dims[i].attach_scale(\n h5[gtx]['sea_ice_segments'][dim])\n #-- add HDF5 variable attributes\n for att_name,att_val in attrs.items():\n h5[gtx]['sea_ice_segments'][key][k].attrs[att_name] = att_val\n\n #-- HDF5 file title\n fileID.attrs['featureType'] = 'trajectory'\n fileID.attrs['title'] = 'ATLAS/ICESat-2 L3A Sea Ice Height'\n fileID.attrs['summary'] = ('Estimates of the sea ice correction parameters '\n 'needed to interpret and assess the quality of sea height estimates.')\n fileID.attrs['description'] = ('The data set (ATL07) contains along-track '\n 'heights for sea ice and open water leads (at varying length scales) '\n 'relative to the WGS84 ellipsoid (ITRF2014 reference frame) after '\n 'adjustment for geoidal and tidal variations, and inverted barometer '\n 'effects.')\n date_created = datetime.datetime.today()\n fileID.attrs['date_created'] = date_created.isoformat()\n project = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'\n fileID.attrs['project'] = project\n platform = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'\n fileID.attrs['project'] = platform\n #-- add attribute for elevation instrument and designated processing level\n instrument = 'ATLAS > Advanced Topographic Laser Altimeter System'\n fileID.attrs['instrument'] = instrument\n fileID.attrs['source'] = 'Spacecraft'\n fileID.attrs['references'] = 'https://nsidc.org/data/icesat-2'\n fileID.attrs['processing_level'] = '4'\n #-- add attributes for input ATL07 file\n fileID.attrs['input_files'] = os.path.basename(INPUT)\n #-- find geospatial and temporal ranges\n lnmn,lnmx,ltmn,ltmx,tmn,tmx = (np.inf,-np.inf,np.inf,-np.inf,np.inf,-np.inf)\n for gtx in beams:\n lon = IS2_atl07_corr[gtx]['sea_ice_segments']['longitude']\n lat = IS2_atl07_corr[gtx]['sea_ice_segments']['latitude']\n delta_time = IS2_atl07_corr[gtx]['sea_ice_segments']['delta_time']\n #-- setting the geospatial and temporal ranges\n lnmn = lon.min() if (lon.min() < lnmn) else lnmn\n lnmx = lon.max() if (lon.max() > lnmx) else lnmx\n ltmn = lat.min() if (lat.min() < ltmn) else ltmn\n ltmx = lat.max() if (lat.max() > ltmx) else ltmx\n tmn = delta_time.min() if (delta_time.min() < tmn) else tmn\n tmx = delta_time.max() if (delta_time.max() > tmx) else tmx\n #-- add geospatial and temporal attributes\n fileID.attrs['geospatial_lat_min'] = ltmn\n fileID.attrs['geospatial_lat_max'] = ltmx\n fileID.attrs['geospatial_lon_min'] = lnmn\n fileID.attrs['geospatial_lon_max'] = lnmx\n fileID.attrs['geospatial_lat_units'] = \"degrees_north\"\n fileID.attrs['geospatial_lon_units'] = \"degrees_east\"\n fileID.attrs['geospatial_ellipsoid'] = \"WGS84\"\n fileID.attrs['date_type'] = 'UTC'\n fileID.attrs['time_type'] = 'CCSDS UTC-A'\n #-- convert start and end time from ATLAS SDP seconds into GPS seconds\n atlas_sdp_gps_epoch=IS2_atl07_corr['ancillary_data']['atlas_sdp_gps_epoch']\n gps_seconds = atlas_sdp_gps_epoch + np.array([tmn,tmx])\n #-- calculate leap seconds\n leaps = icesat2_toolkit.time.count_leap_seconds(gps_seconds)\n #-- convert from seconds since 1980-01-06T00:00:00 to Modified Julian days\n MJD = icesat2_toolkit.time.convert_delta_time(gps_seconds - leaps,\n epoch1=(1980,1,6,0,0,0), epoch2=(1858,11,17,0,0,0), scale=1.0/86400.0)\n #-- convert to calendar date\n YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(MJD + 2400000.5,\n FORMAT='tuple')\n #-- add attributes with measurement date start, end and duration\n tcs = datetime.datetime(int(YY[0]), int(MM[0]), int(DD[0]),\n int(HH[0]), int(MN[0]), int(SS[0]), int(1e6*(SS[0] % 1)))\n fileID.attrs['time_coverage_start'] = tcs.isoformat()\n tce = datetime.datetime(int(YY[1]), int(MM[1]), int(DD[1]),\n int(HH[1]), int(MN[1]), int(SS[1]), int(1e6*(SS[1] % 1)))\n fileID.attrs['time_coverage_end'] = tce.isoformat()\n fileID.attrs['time_coverage_duration'] = '{0:0.0f}'.format(tmx-tmn)\n #-- Closing the HDF5 file\n fileID.close()\n\n#-- Main program that calls interp_sea_level_ICESat2()\ndef main():\n #-- Read the system arguments listed after the program\n parser = argparse.ArgumentParser(\n description=\"\"\"Interpolates AVISO sea level anomalies, absolute\n dynamic topography and mean dynamic topography to ICESat-2\n ATL07 sea ice height data\n \"\"\"\n )\n #-- command line parameters\n parser.add_argument('infile',\n type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='+',\n help='ICESat-2 ATL07 file to run')\n #-- directory with sea level data\n parser.add_argument('--directory','-D',\n type=lambda p: os.path.abspath(os.path.expanduser(p)),\n default=os.getcwd(),\n help='Working data directory')\n #-- verbosity settings\n #-- verbose will output information about each output file\n parser.add_argument('--verbose','-V',\n default=False, action='store_true',\n help='Output information about each created file')\n #-- permissions mode of the local files (number in octal)\n parser.add_argument('--mode','-M',\n type=lambda x: int(x,base=8), default=0o775,\n help='Permission mode of directories and files created')\n args = parser.parse_args()\n\n #-- run for each input ATL07 file\n for FILE in args.infile:\n interp_sea_level_ICESat2(args.directory, FILE,\n VERBOSE=args.verbose, MODE=args.mode)\n\n#-- run main program\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.sum", "numpy.zeros_like", "numpy.array", "numpy.floor", "numpy.copy", "numpy.logical_not", "numpy.shape", "numpy.isnan", "numpy.broadcast_to", "numpy.meshgrid", "numpy.nonzero", "numpy.ma.zeros" ] ]
FDU-VTS/Person-Search
[ "36a1eab8d8fdf149e32dece030edff02dbc8a915" ]
[ "models/reid.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: liaoxingyu\n@contact: [email protected]\n\"\"\"\n\nimport math\n\nimport torch\nfrom torch import nn\nfrom torch.utils import model_zoo\n\nfrom models.context_block import *\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\nmodel_layers = {\n 'resnet50': [3, 4, 6, 3],\n 'resnet101': [3, 4, 23, 3]\n}\n\n__all__ = ['ResNet', 'Bottleneck']\n\n\nclass IBN(nn.Module):\n \"\"\"\n IBN with BN:IN = 7:1\n \"\"\"\n\n def __init__(self, planes):\n super(IBN, self).__init__()\n half1 = int(planes / 8)\n self.half = half1\n half2 = planes - half1\n self.IN = nn.InstanceNorm2d(half1, affine=True)\n self.BN = nn.BatchNorm2d(half2)\n\n def forward(self, x):\n split = torch.split(x, self.half, dim=1)\n out1 = self.IN(split[0].contiguous())\n out2 = self.BN(torch.cat(split[1:], dim=1).contiguous())\n out = torch.cat((out1, out2), 1)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, with_ibn=False, gcb=None, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.with_gcb = gcb is not None\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n\n if with_ibn:\n self.bn1 = IBN(planes)\n else:\n self.bn1 = nn.BatchNorm2d(planes)\n\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n # GCNet\n if self.with_gcb:\n gcb_inplanes = planes * self.expansion\n self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.with_gcb:\n out = self.context_block(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, last_stride, with_ibn, gcb, stage_with_gcb, block, layers):\n scale = 64\n self.inplanes = scale\n super().__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, scale, layers[0], with_ibn=with_ibn,\n gcb=gcb if stage_with_gcb[0] else None)\n self.layer2 = self._make_layer(block, scale * 2, layers[1], stride=2, with_ibn=with_ibn,\n gcb=gcb if stage_with_gcb[1] else None)\n self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2, with_ibn=with_ibn,\n gcb=gcb if stage_with_gcb[2] else None)\n self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=last_stride,\n gcb=gcb if stage_with_gcb[3] else None)\n\n def _make_layer(self, block, planes, blocks, stride=1, with_ibn=False, gcb=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n if planes == 512:\n with_ibn = False\n layers.append(block(self.inplanes, planes, with_ibn, gcb, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, with_ibn, gcb))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n def load_pretrain(self, model_path=''):\n with_model_path = (model_path is not '')\n if not with_model_path: # resnet pretrain\n state_dict = model_zoo.load_url(model_urls[self._model_name])\n state_dict.pop('fc.weight')\n state_dict.pop('fc.bias')\n self.load_state_dict(state_dict)\n else:\n # ibn pretrain\n state_dict = torch.load(model_path)['state_dict']\n state_dict.pop('module.fc.weight')\n state_dict.pop('module.fc.bias')\n new_state_dict = {}\n for k in state_dict:\n new_k = '.'.join(k.split('.')[1:]) # remove module in name\n if self.state_dict()[new_k].shape == state_dict[k].shape:\n new_state_dict[new_k] = state_dict[k]\n state_dict = new_state_dict\n self.load_state_dict(state_dict, strict=False)\n\n def random_init(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n @classmethod\n def from_name(cls, model_name, last_stride, with_ibn, gcb, stage_with_gcb):\n cls._model_name = model_name\n return ResNet(last_stride, with_ibn, gcb, stage_with_gcb, block=Bottleneck, layers=model_layers[model_name])\n\n\n\nclass Baseline(nn.Module):\n in_planes = 2048\n\n def __init__(self,\n backbone,\n num_classes,\n last_stride,\n with_ibn,\n gcb,\n stage_with_gcb,\n pretrain=True,\n model_path=''):\n super().__init__()\n try:\n self.base = ResNet.from_name(backbone, last_stride, with_ibn, gcb, stage_with_gcb)\n except:\n print(f'not support {backbone} backbone')\n\n if pretrain:\n self.base.load_pretrain(model_path)\n\n self.gap = nn.AdaptiveAvgPool2d(1)\n self.num_classes = num_classes\n\n self.bottleneck = nn.BatchNorm1d(self.in_planes)\n self.bottleneck.bias.requires_grad_(False) # no shift\n\n self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)\n\n\n def forward(self, x, label=None):\n base = self.base(x)\n global_feat = self.gap(base) # (b, 2048, 1, 1)\n global_feat = global_feat.view(-1, global_feat.size()[1])\n feat = self.bottleneck(global_feat) # normalize for angular softmax\n return feat, torch.sum(base*feat.unsqueeze(-1).unsqueeze(-1), dim=1)\n\n def load_params_wo_fc(self, state_dict):\n # new_state_dict = {}\n # for k, v in state_dict.items():\n # k = '.'.join(k.split('.')[1:])\n # new_state_dict[k] = v\n # state_dict = new_state_dict\n state_dict.pop('classifier.weight')\n res = self.load_state_dict(state_dict, strict=False)\n assert str(res.missing_keys) == str(['classifier.weight',]), 'issue loading pretrained weights'\nif __name__ == \"__main__\":\n model = Baseline(\n 'resnet50',\n 1453,\n 1,\n True,\n \"ratio\",\n (False, False, False, False),\n pretrain = False,\n model_path = '')\n print(model)" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.load", "torch.split", "torch.nn.BatchNorm1d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.nn.Sequential", "torch.utils.model_zoo.load_url", "torch.cat" ] ]
MaximeSorgenfrei/cat_dog_cnn
[ "bc1301fb683de2111db2c25b9da22608ede8e070" ]
[ "webcam_animal_classifier.py" ]
[ "import cv2\nimport keras\nfrom keras.models import Sequential, Model\nfrom keras.callbacks import EarlyStopping\nfrom keras.optimizers import Adam\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nfrom random import shuffle\n# import tensorflow as tf\nimport time\n\nfile_list = os.listdir(\"./\")\nkeras_model_files = []\nfor file in file_list:\n if file.split(\".\")[-1] in [\"h5\",\"npy\"]:\n print(file)\n keras_model_files.append(file)\n\n# load model from file\nkeras_model_file_i_want_to_use = keras_model_files[0]\nmodel = keras.models.load_model(keras_model_file_i_want_to_use)\nmodel.summary()\n# classes = [\"ape\", \"bear\", \"bee\", \"beetle\", \"bird\", \"bos\", \"canine\", \"deer\", \"elephants\", \"feline\", \"frogs\", \"gekko\", \"golden moles\", \"hare\", \"human\", \"lemur\", \"loris\", \"none\", \"rodent\", \"salamander\", \"scorpions\", \"shark\", \"sheep\", \"snake\", \"spider\", \"squirrel\", \"turtle\", \"whale\"]\n# read directories, resize and label data\n# Write some Text\n\n# dict\nwith open(keras_model_files[1],\"r\") as f:\n class_list = json.load(f)\n class_stats = pd.DataFrame(data={\"classes\":class_list})\n classes = class_stats[\"classes\"].to_dict()\nf.close()\nprint(\"Classes: {}\".format(classes))\nprint(\"Using following model file for predictions:\\n{}\".format(keras_model_file_i_want_to_use))\n\nfont = cv2.FONT_HERSHEY_COMPLEX\nbottomLeftCornerOfText = (50,50)\nbottomLeftCornerOfText2 = (50,75)\nfontScale = 0.5\nfontColor = (255,255,255)\nlineType = 2\n\nwidth, height = 50, 50\ncap_width = 1280\ncap_height = 720\nroi_width = 400\nroi_height = 300\n\nWebCam_cap = cv2.VideoCapture(0)\nWebCam_cap.set(cv2.CAP_PROP_FRAME_WIDTH, cap_width)\nWebCam_cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cap_height)\n\nSETTING_PHOTOFRAME = True\n\nwhile True:\n # get frame\n ret, frame = WebCam_cap.read()\n # print(type(frame), frame.shape)\n try:\n # reduce frame to 50x50 pixles\n # image = cv2.imread(frame, cv2.IMREAD_GRAYSCALE)\n\n if SETTING_PHOTOFRAME:\n roi = np.ones_like(frame)\n roi[int((cap_height-roi_height)/2):-int((cap_height-roi_height)/2), int((cap_width-roi_width)/2):-int((cap_width-roi_width)/2), :] = frame[int((cap_height-roi_height)/2):-int((cap_height-roi_height)/2), int((cap_width-roi_width)/2):-int((cap_width-roi_width)/2), :]\n image = frame[int((cap_height-roi_height)/2):-int((cap_height-roi_height)/2), int((cap_width-roi_width)/2):-int((cap_width-roi_width)/2), :]\n # print(\"image shape: \",image.shape)\n else:\n image = frame\n # resize, turn to gray and reshape for CNN\n image = cv2.resize(image, (height, width), interpolation=cv2.INTER_AREA)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image_to_predict = np.reshape(image, (1, height, width, 1))\n # predict with NN\n pred = model.predict_classes(image_to_predict, verbose=0)\n pred_ = model.predict(image_to_predict, verbose=0)\n prediction = \"{}: {} | {}: {}\".format(classes[0], pred_[0][0], classes[1], pred_[0][1])\n if pred_[0][pred[0]] > 0.30:\n prediction_class = \"Predicted class: {} [{:.2f}]\".format(classes[pred[0]], pred_[0][pred[0]])\n else:\n prediction_class = \"No significant prediction possible!\"\n # print prediction and class to frame\n # cv2.putText(frame, prediction, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)\n if SETTING_PHOTOFRAME:\n cv2.putText(roi, prediction_class, bottomLeftCornerOfText2, font, fontScale, fontColor, lineType)\n else:\n cv2.putText(frame, prediction_class, bottomLeftCornerOfText2, font, fontScale, fontColor, lineType)\n # ax[i].set_title(\"{}: {}-{} ({})\".format(i, pred, classes[pred[0]], np.round(pred_, decimals=4)))\n # display resut\n# cv2.namedWindow(\"Result\", cv2.WINDOW_AUTOSIZE)\n# cv2.imshow(\"Result\", image)\n except Exception as e:\n print(e)\n else:\n cv2.namedWindow(\"WebCam\", cv2.WINDOW_AUTOSIZE)\n if SETTING_PHOTOFRAME:\n cv2.imshow(\"WebCam\", roi)\n else:\n cv2.imshow(\"WebCam\", frame)\n \n if cv2.waitKey(1) & 0xFF==ord(\"q\"):\n break\n \nWebCam_cap.release()\ncv2.destroyAllWindows()\n" ]
[ [ "pandas.DataFrame", "numpy.ones_like", "numpy.reshape" ] ]
534ttl3/ctsutils
[ "b070bf349d4a112df576404a3948e0de60f24927", "b070bf349d4a112df576404a3948e0de60f24927" ]
[ "ctsutils/test.py", "ctsutils/mpl_slider.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ctsutils.cparameterspace import CParam, CParameterSpace\n\n\ndef foo(X, Y, Y2):\n \"\"\" \"\"\"\n return (1 - X / 2 + X ** 5 + (Y + Y2 ) ** 3) * np.exp(-X ** 2 - (Y + Y2 ) ** 2) # calcul du tableau des valeurs de Z\n\n\ndef foo(X, Y, Y2, Y3):\n \"\"\" \"\"\"\n return (1 - X / 2 + X ** 5 + (Y + Y2 + Y3) ** 3) * np.exp(-X ** 2 - (Y + Y2 + Y3) ** 2) # calcul du tableau des valeurs de Z\n\n\nps = CParameterSpace([CParam(\"x\", np.linspace(-3, 3, 51), unit=\"m\"),\n CParam(\"y\", np.linspace(-2, 2, 41)),\n CParam(\"y2\", np.linspace(-1, 1, 31)),\n CParam(\"y3\", np.linspace(-1, 1, 10))])\n\n# import pdb; pdb.set_trace() # noqa BREAKPOINT\n# x = ps.get_arr(\"x\")\n\nZ = ps.calc_function(foo, args_param_names=(\"x\", \"y\", \"y2\", \"y3\"))\n\nintegrals = ps.calc_integral(Z, \"x\")\n# import pdb; pdb.set_trace() # noqa BREAKPOINT\n\n# fig, ax = plt.subplots(1, 1)\n# ps.plot(Z, ordering_of_params_names=(\"y2\", \"y\"), ax=ax)\n# plt.show()\n\n# import pdb; pdb.set_trace() # noqa BREAKPOINT\n\nfig, ax = plt.subplots(1, 1)\n\n#ps.plot(Z, z_label=\"Z\", ordering_of_params_name_and_value=((\"y3\", None), (\"y2\", None)), ax=ax)\nps.plot(integrals, z_label=\"integrals\", ordering_of_params_name_and_value=((\"y3\", None), (\"y2\", None)), ax=ax)\n\n# ps.plot(integrals, z_label=\"integrals\", ordering_of_params_name_and_value=((\"y2\", None), (\"y\", None)), ax=ax)\n\nplt.show()\n", "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(left=0.25, bottom=0.25)\nt = np.arange(0.0, 1.0, 0.001)\na0 = 5\nf0 = 3\ndelta_f = 5.0\ns = a0 * np.sin(2 * np.pi * f0 * t)\nl, = plt.plot(t, s, lw=2)\nax.margins(x=0)\n\naxcolor = 'lightgoldenrodyellow'\naxfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\naxamp = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)\n\nsfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0, valstep=delta_f)\nsamp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)\n\n\ndef update(val):\n print(val)\n amp = samp.val\n freq = sfreq.val\n l.set_ydata(amp*np.sin(2*np.pi*freq*t))\n fig.canvas.draw_idle()\n\n\nsfreq.on_changed(update)\nsamp.on_changed(update)\n\nresetax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbutton = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\n\ndef reset(event):\n sfreq.reset()\n samp.reset()\nbutton.on_clicked(reset)\n\nrax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)\nradio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)\n\n\ndef colorfunc(label):\n l.set_color(label)\n fig.canvas.draw_idle()\nradio.on_clicked(colorfunc)\n\nplt.show()\n" ]
[ [ "numpy.exp", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.pyplot.subplots" ], [ "numpy.sin", "matplotlib.widgets.Button", "matplotlib.pyplot.subplots", "matplotlib.pyplot.axes", "numpy.arange", "matplotlib.widgets.RadioButtons", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.widgets.Slider" ] ]
sereini/SpeechSeparationModel
[ "ea44c845762112f3bc2e5e54c5530e6fd429464f" ]
[ "preprocessing/embedding/export_FaceEmbedding.py" ]
[ "\"\"\"\nExports the embeddings of a directory of images as numpy arrays.\nFollowing structure:\n D:\\images:\n folder1:\n img_0\n ...\n img_74\n folder2:\n img_0\n ...\n img_74\n \nOutput:\nembeddings.npy -- Embeddings as np array (with names \"folder1\", \"folder2\", etc.)\n\nUse --is_aligned False, if your images aren't already pre-aligned\nUse --image_batch to dictacte how many images to load in memory at a time.\n\n\nStarted with export_embeddings.py from Charles Jekel, and modified the program\nto export the face embeddings for the audio-visual speech separation model. The\npretrained model is from David Sandberg's facenet repository:\n https://github.com/davidsandberg/facenet\nexport_embedding.py from same project:\n https://github.com/davidsandberg/facenet/tree/master/contributed\n\n\nEnsure you have set the PYTHONPATH for the pretrained facenet (3.):\n https://github.com/davidsandberg/facenet/wiki/Validate-on-LFW\nExecution:\n python export_FaceEmbedding.py models\\20180402-114759\\20180402-114759.pb D:\\images --is_aligned False --image_size 160 --gpu_memory_fraction 0.5 --image_batch 75\n\n\nSereina Scherrer 2019\n\"\"\"\n\n# MIT License\n#\n# Copyright (c) 2016 David Sandberg\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nfrom scipy import misc\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport os\nimport argparse\nimport facenet\nimport align.detect_face\nimport re\nimport glob\n\nfrom six.moves import xrange\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\ndef natural_keys(text):\n return [atoi(c) for c in re.split(r'(\\d+)', text)]\n\ndef main(args):\n train_set = facenet.get_dataset(args.data_dir)\n image_list, label_list = facenet.get_image_paths_and_labels(train_set)\n \n # sort the image:s img_0 ... img_74\n image_list.sort(key=natural_keys)\n \n # fetch the classes (labels as strings) exactly as it's done in get_dataset\n path_exp = os.path.expanduser(args.data_dir)\n classes = [path for path in os.listdir(path_exp) \\\n if os.path.isdir(os.path.join(path_exp, path))]\n classes.sort()\n # get the label strings\n label_strings = [name for name in classes if \\\n os.path.isdir(os.path.join(path_exp, name))]\n\n # define path to save the embeddings\n dirs = [\"./emb/embeddings_AVspeech/\"]\n for d in dirs:\n if not os.path.exists(d):\n os.makedirs(d)\n print(\"Folder created:\", d)\n \n with tf.Graph().as_default():\n\n with tf.Session() as sess:\n\n # Load the model\n facenet.load_model(args.model_dir)\n\n # Get input and output tensors\n images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n\n # Run forward pass to calculate embeddings\n nrof_images = len(image_list)\n print('Number of images: ', nrof_images)\n batch_size = args.image_batch\n if nrof_images % batch_size == 0:\n nrof_batches = nrof_images // batch_size\n else:\n nrof_batches = (nrof_images // batch_size) + 1\n print('Number of batches: ', nrof_batches)\n embedding_size = embeddings.get_shape()[1]\n emb_array = np.zeros((nrof_images, embedding_size))\n start_time = time.time()\n\n for i in range(nrof_batches):\n if i == nrof_batches -1:\n n = nrof_images\n else:\n n = i*batch_size + batch_size\n # Get images for the batch\n if args.is_aligned is True:\n images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size)\n else:\n images = load_and_align_data(image_list[i*batch_size:n], args.image_size, args.margin, args.gpu_memory_fraction)\n feed_dict = { images_placeholder: images, phase_train_placeholder:False }\n # Use the facenet model to calcualte embeddings\n embed = sess.run(embeddings, feed_dict=feed_dict)\n emb_array[i*batch_size:n, :] = embed\n \n # export the embedding\n s = dirs[0] + label_strings[i] + \".npy\" \n np.save(s, embed)\n \n print('Completed batch', i+1, 'of', nrof_batches)\n\n run_time = time.time() - start_time\n print('Run time: ', run_time)\n print('Time per video: ',run_time/nrof_batches)\n\n\n\ndef load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):\n\n\n print('Creating networks and loading parameters')\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)\n\n nrof_samples = len(image_paths)\n img_list = [None] * nrof_samples\n for i in xrange(nrof_samples):\n print(image_paths[i])\n img = misc.imread(os.path.expanduser(image_paths[i]))\n \n aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')\n prewhitened = facenet.prewhiten(aligned)\n img_list[i] = prewhitened \n \n # uncomment if you want to save the aligned images\n '''f = os.path.basename(image_paths[i])\n #print(f)\n tmp_folder = re.split(r'\\\\', image_paths[i])\n tmp_f = tmp_folder[-2]\n d = \"./aligned/\" + tmp_f + \"/\"\n if not os.path.exists(d):\n os.makedirs(d)\n print(\"Folder created:\", d)\n \n misc.imsave(d + f, aligned)'''\n \n images = np.stack(img_list)\n return images\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('model_dir', type=str,\n help='Directory containing the meta_file and ckpt_file')\n parser.add_argument('data_dir', type=str,\n help='Directory containing images. If images are not already aligned and cropped include --is_aligned False.')\n parser.add_argument('--is_aligned', type=str,\n help='Is the data directory already aligned and cropped?', default=True)\n parser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels.', default=160)\n parser.add_argument('--margin', type=int,\n help='Margin for the crop around the bounding box (height, width) in pixels.',\n default=44)\n parser.add_argument('--gpu_memory_fraction', type=float,\n help='Upper bound on the amount of GPU memory that will be used by the process.',\n default=1.0)\n parser.add_argument('--image_batch', type=int,\n help='Number of images stored in memory at a time. Default 75.',\n default=75)\n\n return parser.parse_args(argv)\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n" ]
[ [ "numpy.save", "numpy.zeros", "scipy.misc.imresize", "tensorflow.Graph", "tensorflow.Session", "tensorflow.get_default_graph", "numpy.stack", "tensorflow.GPUOptions", "tensorflow.ConfigProto" ] ]
yoshitomo-matsubara/vision
[ "03d11338f3faf94a0749549912593ddb8b70be17" ]
[ "references/detection/utils.py" ]
[ "import datetime\nimport errno\nimport os\nimport time\nfrom collections import defaultdict, deque\n\nimport torch\nimport torch.distributed as dist\n\n\nclass SmoothedValue:\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({global_avg:.4f})\"\n self.deque = deque(maxlen=window_size)\n self.total = 0.0\n self.count = 0\n self.fmt = fmt\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n def synchronize_between_processes(self):\n \"\"\"\n Warning: does not synchronize the deque!\n \"\"\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor([self.count, self.total], dtype=torch.float64, device=\"cuda\")\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.count = int(t[0])\n self.total = t[1]\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque), dtype=torch.float32)\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n @property\n def max(self):\n return max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n def __str__(self):\n return self.fmt.format(\n median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value\n )\n\n\ndef all_gather(data):\n \"\"\"\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n \"\"\"\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n data_list = [None] * world_size\n dist.all_gather_object(data_list, data)\n return data_list\n\n\ndef reduce_dict(input_dict, average=True):\n \"\"\"\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that all processes\n have the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return input_dict\n with torch.inference_mode():\n names = []\n values = []\n # sort the keys so that they are consistent across processes\n for k in sorted(input_dict.keys()):\n names.append(k)\n values.append(input_dict[k])\n values = torch.stack(values, dim=0)\n dist.all_reduce(values)\n if average:\n values /= world_size\n reduced_dict = {k: v for k, v in zip(names, values)}\n return reduced_dict\n\n\nclass MetricLogger:\n def __init__(self, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{attr}'\")\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(f\"{name}: {str(meter)}\")\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None):\n i = 0\n if not header:\n header = \"\"\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt=\"{avg:.4f}\")\n data_time = SmoothedValue(fmt=\"{avg:.4f}\")\n space_fmt = \":\" + str(len(str(len(iterable)))) + \"d\"\n if torch.cuda.is_available():\n log_msg = self.delimiter.join(\n [\n header,\n \"[{0\" + space_fmt + \"}/{1}]\",\n \"eta: {eta}\",\n \"{meters}\",\n \"time: {time}\",\n \"data: {data}\",\n \"max mem: {memory:.0f}\",\n ]\n )\n else:\n log_msg = self.delimiter.join(\n [header, \"[{0\" + space_fmt + \"}/{1}]\", \"eta: {eta}\", \"{meters}\", \"time: {time}\", \"data: {data}\"]\n )\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == len(iterable) - 1:\n eta_seconds = iter_time.global_avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n if torch.cuda.is_available():\n print(\n log_msg.format(\n i,\n len(iterable),\n eta=eta_string,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB,\n )\n )\n else:\n print(\n log_msg.format(\n i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)\n )\n )\n i += 1\n end = time.time()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print(f\"{header} Total time: {total_time_str} ({total_time / len(iterable):.4f} s / it)\")\n\n\ndef collate_fn(batch):\n return tuple(zip(*batch))\n\n\ndef mkdir(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef setup_for_distributed(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop(\"force\", False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n if is_main_process():\n torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n if \"RANK\" in os.environ and \"WORLD_SIZE\" in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n args.gpu = int(os.environ[\"LOCAL_RANK\"])\n elif \"SLURM_PROCID\" in os.environ:\n args.rank = int(os.environ[\"SLURM_PROCID\"])\n args.gpu = args.rank % torch.cuda.device_count()\n else:\n print(\"Not using distributed mode\")\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = \"nccl\"\n print(f\"| distributed init (rank {args.rank}): {args.dist_url}\", flush=True)\n torch.distributed.init_process_group(\n backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank\n )\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)\n" ]
[ [ "torch.stack", "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.inference_mode", "torch.distributed.init_process_group", "torch.distributed.is_available", "torch.tensor", "torch.save", "torch.cuda.device_count", "torch.distributed.all_gather_object", "torch.distributed.barrier", "torch.distributed.is_initialized", "torch.cuda.is_available", "torch.distributed.all_reduce", "torch.cuda.max_memory_allocated", "torch.cuda.set_device" ] ]
UBC-MDS/world-energy-visualization
[ "6f8dd756a8c158a332fd94ca1f025fc16bcd77b3" ]
[ "src/tab1_mapview.py" ]
[ "from dash import Input, Output, callback, html, dcc, State\nimport dash_bootstrap_components as dbc\n\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport plotly.io as pio\nimport plotly.graph_objects as go\n\nfrom urllib.request import urlopen\nimport json\n\ndf_all = pd.read_csv(\n \"data/Primary-energy-consumption-from-fossilfuels-nuclear-renewables.csv\"\n)\ndf_notna_wide = df_all[df_all[\"Code\"].notna()]\ndf_notna = df_notna_wide.melt(\n id_vars=[\"Entity\", \"Code\", \"Year\"],\n value_vars=[\"Fossil\", \"Renewables\", \"Nuclear\"],\n var_name=\"energy_type\",\n value_name=\"percentage\",\n).merge(df_notna_wide, on=[\"Year\", \"Code\", \"Entity\"])\n\ndf_countries = df_notna[df_notna[\"Code\"] != \"OWID_WRL\"]\ndf_world = df_notna[df_notna[\"Code\"] == \"OWID_WRL\"]\ndf_continents = df_all[df_all[\"Code\"].isna()]\n\nlist_of_continents = df_continents[\"Entity\"].unique()\nlist_of_countries = df_countries[\"Entity\"].unique()\nlist_yrs = df_all[\"Year\"].unique()\n\nproj_param = {\n \"World\": [0, 0, 1],\n \"North America\": [40, -120, 2],\n \"Europe\": [50, 20, 4],\n \"Africa\": [0, 20, 2],\n}\n\n# ==============================================================================\n# Layout for map and barchart\n# ==============================================================================\n\ntab1_plots = dbc.Col(\n [\n dbc.Row(\n [\n html.H4(\"World Consumption by Country\", style={\"width\": \"fit-content\"}),\n dbc.Col(\n [\n dbc.Button(\n id=\"map_tooltip\",\n color=\"secondary\",\n children=\"?\",\n size=\"sm\",\n outline=True,\n ),\n dbc.Tooltip(\n \"Drag and select the number of year to view the change of engergy consumption distribution using the slide bar. You can hover or zoom to get the details of a specific region.\",\n target=\"map_tooltip\",\n placement=\"bottom\",\n ),\n ]\n ),\n ],\n style={\"padding\": \"3vh 0\"},\n ),\n dcc.Graph(id=\"tab1-map\"),\n html.Div(\n dcc.Slider(\n id=\"tab1-year-slider\",\n min=list_yrs.min(),\n max=list_yrs.max(),\n step=1,\n value=list_yrs.max(),\n marks={\n int(i): str(i) for i in np.append(list_yrs[::5], [list_yrs.max()])\n },\n tooltip={\"placement\": \"top\", \"always_visible\": True},\n updatemode=\"drag\",\n ),\n style={\"padding\": \"0vh 10vw\"},\n ),\n html.Br(),\n dbc.Row(\n [\n html.H4(\n \"Top/Bottom energy consumer nations\", style={\"width\": \"fit-content\"}\n ),\n dbc.Col(\n [\n dbc.Button(\n id=\"bar_tooltip\",\n color=\"secondary\",\n children=\"?\",\n size=\"sm\",\n outline=True,\n ),\n dbc.Tooltip(\n \"Select the number of countries to view in the bar plot using the input tab,\"\n \"then select whether to view to the top or bottom consumers.\"\n \"Hover the bar for details.\",\n target=\"bar_tooltip\",\n placement=\"bottom\",\n ),\n ],\n style={\"padding\": \"0 0\"},\n ),\n ]\n ),\n html.Br(),\n dbc.Row(\n [\n dbc.Col(\n [\n dbc.Row(\n [\n html.H4(\n \"Number of countries\",\n style={\"font-size\": \"20px\", \"width\": \"fit-content\"},\n ),\n dbc.Col(\n [\n dbc.Button(\n id=\"topN_tooltip\",\n color=\"secondary\",\n children=\"?\",\n size=\"sm\",\n outline=True,\n ),\n dbc.Tooltip(\n \"Controls the number of countries to view in the barchart. Select upto 15 countries\",\n target=\"topN_tooltip\",\n placement=\"bottom\",\n ),\n ],\n style={\"padding\": \"0 0\"},\n ),\n ]\n ),\n html.Br(),\n dbc.Input(\n id=\"tab1-input-topN\",\n value=10,\n type=\"number\",\n debounce=True,\n required=True,\n minlength=1,\n max=15,\n min=0,\n ),\n ]\n ),\n dbc.Col(\n [\n dbc.Row(\n [\n html.H4(\n \"Ranking type\",\n style={\"font-size\": \"20px\", \"width\": \"fit-content\"},\n ),\n dbc.Col(\n [\n dbc.Button(\n id=\"top_bot_tooltip\",\n color=\"secondary\",\n children=\"?\",\n size=\"sm\",\n outline=True,\n ),\n dbc.Tooltip(\n \"Select whether you want to view the top or bottom consumers\",\n target=\"top_bot_tooltip\",\n placement=\"bottom\",\n ),\n ],\n style={\"padding\": \"0 0\"},\n ),\n ]\n ),\n html.Br(),\n dcc.RadioItems(\n [\"Top\", \"Bottom\"],\n value=\"Top\",\n id=\"tab1_top_bot\",\n inline=True,\n labelStyle={\n \"margin-right\": \"10px\",\n \"margin-top\": \"1px\",\n \"display\": \"inline-block\",\n \"horizontal-align\": \"\",\n },\n ),\n ],\n style={\n \"padding\": \"0 0\",\n },\n ),\n ]\n ),\n html.Br(),\n dcc.Graph(id=\"tab1-barchart\"),\n ]\n)\n\n\n# ==============================================================================\n# World Map\n# ==============================================================================\n\n\n@callback(\n Output(\"tab1-map\", \"figure\"),\n Input(\"tab1-energy-type-dropdown\", \"value\"),\n Input(\"tab1-year-slider\", \"value\"),\n Input(\"tab1-map-focus\", \"value\"),\n)\ndef display_map(energy_type, year, scope):\n \"\"\"\n Docs\n \"\"\"\n # scope = \"Africa\"\n df = df_notna.query(\"Year==@year & energy_type==@energy_type\")\n\n fig = px.choropleth(\n df,\n locations=\"Code\",\n color=\"percentage\",\n hover_name=\"Entity\",\n hover_data={\n \"Year\": True,\n \"Fossil\": True,\n \"Nuclear\": True,\n \"Renewables\": True,\n \"percentage\": False,\n \"Code\": False,\n },\n color_continuous_scale=px.colors.sequential.YlGn,\n range_color=[0, 100],\n )\n\n fig.update_layout(\n dragmode=\"zoom\",\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0},\n title={\n \"text\": \"Global \"\n + str(energy_type)\n + \" Energy Consumption in \"\n + str(year),\n \"x\": 0.5,\n \"xanchor\": \"center\",\n },\n )\n\n fig.update_geos(\n showcountries=True,\n center={\"lat\": proj_param[scope][0], \"lon\": proj_param[scope][1]},\n projection={\"scale\": proj_param[scope][2]},\n )\n\n return fig\n\n\n# ==============================================================================\n# Top N countries barchart\n# ==============================================================================\n\n\n@callback(\n Output(\"tab1-barchart\", \"figure\"),\n Input(\"tab1-energy-type-dropdown\", \"value\"),\n Input(\"tab1-year-slider\", \"value\"),\n Input(\"tab1-input-topN\", \"value\"),\n Input(\"tab1_top_bot\", \"value\"),\n)\ndef display_barchart(energy_type, year, topN, top_bot):\n \"\"\"\n Docs\n \"\"\"\n\n if top_bot == \"Top\":\n df_sorted = df_countries.query(\n \"Year==@year & energy_type==@energy_type\"\n ).sort_values([\"percentage\"], ascending=False)[:topN]\n\n elif top_bot == \"Bottom\":\n df_sorted = df_countries.query(\n \"Year==@year & energy_type==@energy_type\"\n ).sort_values([\"percentage\"], ascending=False)[-topN:]\n\n fig_bar = px.bar(\n df_sorted,\n x=\"percentage\",\n y=\"Entity\",\n color=\"percentage\",\n # title=\"Bar Graph\",\n hover_name=\"Entity\",\n hover_data={\n \"Year\": True,\n \"Fossil\": True,\n \"Nuclear\": True,\n \"Renewables\": True,\n \"percentage\": False,\n \"Entity\": False,\n },\n range_color=[0, 100],\n color_continuous_scale=px.colors.sequential.YlGn,\n range_x=[0, 105],\n text_auto=True,\n )\n\n fig_bar.update_layout(\n xaxis_title=\"Percentage %\",\n yaxis_title=\"Country\",\n legend_title=\"%\",\n )\n fig_bar.update_coloraxes(showscale=False)\n fig_bar.update_traces(textposition=\"outside\")\n\n if top_bot == \"Top\":\n fig_bar.update_layout(\n yaxis={\"categoryorder\": \"total ascending\"},\n title={\n \"text\": \"Top \"\n + str(topN)\n + \" \"\n + str(energy_type)\n + \" Energy Consumers in \"\n + str(year),\n \"x\": 0.5,\n \"xanchor\": \"center\",\n },\n )\n\n elif top_bot == \"Bottom\":\n fig_bar.update_layout(\n # yaxis={\"categoryorder\": \"total descending\"},\n title={\n \"text\": \"Bottom \"\n + str(topN)\n + \" \"\n + str(energy_type)\n + \" Energy Consumers in \"\n + str(year),\n \"x\": 0.5,\n \"xanchor\": \"center\",\n },\n )\n\n return fig_bar\n" ]
[ [ "pandas.read_csv" ] ]
Belvenix/IdleonCogOptimizer
[ "6b80b9f11bf0478e2e3522cb07b93b2c8834840b" ]
[ "src/python/board.py" ]
[ "from typing import Tuple\nfrom .cogs import Cog, EmptyCog, Player\nfrom .special_cogs import BoostedCog\nimport numpy as np\n\nclass Board:\n def __init__(self, height: int = 8, width: int = 12, locked: bool = True) -> None:\n self._visualization_board = ''\n self.board = np.array([[EmptyCog() for w in range(width)] for h in range(height)])\n if locked:\n self.mask = np.zeros_like(self.board)\n else:\n self.mask = np.ones_like(self.board)\n self.storage = []\n self.total_build = 0\n self.total_flaggy = 0\n self.total_exp = 0\n\n def unlock(self, mask: np.array):\n assert mask.shape == self.board.shape, \"Mask shape is different than board shape!\"\n self.mask = mask\n \n def empty(self) -> bool:\n for cog in self.board.flatten():\n if not isinstance(cog, EmptyCog):\n return False\n return True\n\n def place(self, x:int, y:int, cog: Cog = EmptyCog()) -> None:\n if self.validate(x, y):\n assert isinstance(cog, Cog), \"You can't place non-cogs on board!\"\n if not isinstance(self.board[y, x], EmptyCog):\n self.storage.append(self.board[y, x])\n self.board[y,x] = cog\n \n def clear(self):\n self.reset_board_values()\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n self.place(x, y, EmptyCog())\n\n def reset_board_values(self):\n self.total_build = 0\n self.total_flaggy = 0\n self.total_exp = 0\n\n def validate(self, x, y) -> bool:\n return (x >= 0 and y >= 0 and x < self.board.shape[1] and y < self.board.shape[0]) and (self.mask[y, x])\n\n def get_totals(self) -> Tuple[int, int, int]:\n return self.total_build, self.total_flaggy, self.total_exp\n\n def calculate_board(self):\n self.reset_loop()\n self.multiply_loop()\n self.sum_loop()\n\n def reset_loop(self):\n self.reset_board_values()\n for c in self.board.flatten():\n c.reset()\n\n def multiply_loop(self):\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n if self.validate(x, y):\n c = self.board[y, x]\n if isinstance(c, BoostedCog):\n boosted_coordinates, boosted_values = c.boosted()\n for bc in boosted_coordinates:\n dx, dy = bc[0], bc[1]\n \n if self.validate(x+dx, y+dy):\n boosted_cog = self.board[y+dy, x+dx]\n boosted_cog.apply_boost(*boosted_values)\n self.board[y+dy, x+dx] = boosted_cog\n \n def sum_loop(self):\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n if self.validate(x, y):\n c = self.board[y, x]\n self.total_build +=c.get_values()[0]\n self.total_flaggy += c.get_values()[1]\n self.total_exp += c.get_values()[2]\n\n def show(self):\n self.print_rates()\n self.print_board()\n self.print_storage()\n self.print_players_info()\n\n def print_rates(self):\n print(\"Total build rate: \" + str(self.total_build) + '\\n' +\n \"Total flaggy rate: \" + str(self.total_flaggy) + '\\n' +\n \"Total extra exp: \" + str(self.total_exp))\n\n def print_board(self):\n board_print = ''\n for y in range(self.board.shape[0]):\n for x in range(self.board.shape[1]):\n board_print += str(self.board[y, x]) + '\\t'\n board_print = board_print[:-1] + '\\n'\n self._visualization_board = board_print\n print(self._visualization_board)\n \n def print_storage(self):\n storage_print = 'In storage: '\n for s in self.storage:\n storage_print += str(s) + ', '\n print(storage_print)\n\n def print_players_info(self):\n print('Player stats:')\n for c in self.board.flatten():\n if isinstance(c, Player):\n print(c.info())" ]
[ [ "numpy.zeros_like", "numpy.ones_like" ] ]
anbhimi/Niffler
[ "81bf6c05132a58d05c7934f66edd0969c3bc9bf5" ]
[ "modules/png-extraction/ImageExtractor.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport glob \nfrom shutil import copyfile\nimport hashlib\nimport json\nimport sys\nimport subprocess\nimport logging\nfrom multiprocessing import Pool\nimport pdb\nimport time\nimport pickle\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport pydicom as dicom \nimport png\n# pydicom imports needed to handle data errors\nfrom pydicom import config\nfrom pydicom import datadict\nfrom pydicom import values \n\nimport pathlib\nconfigs = {}\n\n\ndef initialize_config_and_execute(config_values):\n global configs\n configs = config_values\n # Applying checks for paths\n \n p1 = pathlib.PurePath(configs['DICOMHome'])\n dicom_home = p1.as_posix() # the folder containing your dicom files\n\n p2 = pathlib.PurePath(configs['OutputDirectory'])\n output_directory = p2.as_posix()\n\n print_images = bool(configs['PrintImages'])\n print_only_common_headers = bool(configs['CommonHeadersOnly'])\n depth = int(configs['Depth'])\n processes = int(configs['UseProcesses']) # how many processes to use.\n flattened_to_level = configs['FlattenedToLevel']\n email = configs['YourEmail']\n send_email = bool(configs['SendEmail'])\n no_splits = int(configs['SplitIntoChunks'])\n is16Bit = bool(configs['is16Bit']) \n \n metadata_col_freq_threshold = 0.1\n\n png_destination = output_directory + '/extracted-images/'\n failed = output_directory + '/failed-dicom/'\n maps_directory = output_directory + '/maps/'\n meta_directory = output_directory + '/meta/'\n\n LOG_FILENAME = output_directory + '/ImageExtractor.out'\n pickle_file = output_directory + '/ImageExtractor.pickle'\n\n # record the start time\n t_start = time.time()\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)\n\n if not os.path.exists(maps_directory):\n os.makedirs(maps_directory)\n\n if not os.path.exists(meta_directory):\n os.makedirs(meta_directory)\n\n if not os.path.exists(png_destination):\n os.makedirs(png_destination)\n\n if not os.path.exists(failed):\n os.makedirs(failed)\n\n if not os.path.exists(failed + \"/1\"):\n os.makedirs(failed + \"/1\")\n\n if not os.path.exists(failed + \"/2\"):\n os.makedirs(failed + \"/2\")\n\n if not os.path.exists(failed + \"/3\"):\n os.makedirs(failed + \"/3\")\n\n if not os.path.exists(failed + \"/4\"):\n os.makedirs(failed + \"/4\")\n\n logging.info(\"------- Values Initialization DONE -------\")\n final_res = execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,\n processes, flattened_to_level, email, send_email, no_splits, is16Bit, png_destination,\n failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start)\n return final_res\n\n\n# Function for getting tuple for field,val pairs\ndef get_tuples(plan, outlist = None, key = \"\"):\n if len(key)>0:\n key = key + \"_\"\n if not outlist:\n outlist = []\n for aa in plan.dir():\n try:\n hasattr(plan,aa)\n except TypeError as e:\n logging.warning('Type Error encountered')\n if hasattr(plan, aa) and aa!= 'PixelData':\n value = getattr(plan, aa)\n start = len(outlist)\n # if dicom sequence extract tags from each element\n if type(value) is dicom.sequence.Sequence:\n for nn, ss in enumerate(list(value)):\n newkey = \"_\".join([key,(\"%d\"%nn),aa]) if len(key) else \"_\".join([(\"%d\"%nn),aa])\n candidate = get_tuples(ss,outlist=None,key=newkey)\n # if extracted tuples are too big condense to a string\n if len(candidate)>2000:\n outlist.append((newkey,str(candidate)))\n else:\n outlist.extend(candidate)\n else:\n if type(value) is dicom.valuerep.DSfloat:\n value = float(value)\n elif type(value) is dicom.valuerep.IS:\n value = str(value)\n elif type(value) is dicom.valuerep.MultiValue:\n value = tuple(value)\n elif type(value) is dicom.uid.UID:\n value = str(value)\n outlist.append((key + aa, value))\n # appends name, value pair for this file. these are later concatenated to the dataframe\n return outlist\n\n\ndef extract_headers(f_list_elem):\n nn,ff = f_list_elem # unpack enumerated list\n plan = dicom.dcmread(ff, force=True) # reads in dicom file\n # checks if this file has an image\n c=True\n try:\n check = plan.pixel_array # throws error if dicom file has no image\n except:\n c = False\n kv = get_tuples(plan) # gets tuple for field,val pairs for this file. function defined above\n # dicom images should not have more than 300 dicom tags\n if len(kv)>500:\n logging.debug(str(len(kv)) + \" dicom tags produced by \" + ff)\n kv.append(('file', f_list_elem[1])) # adds my custom field with the original filepath\n kv.append(('has_pix_array',c)) # adds my custom field with if file has image\n if c:\n # adds my custom category field - useful if classifying images before processing\n kv.append(('category','uncategorized'))\n else:\n kv.append(('category','no image')) # adds my custom category field, makes note as imageless\n return dict(kv)\n\n\n# Function to extract pixel array information\n# takes an integer used to index into the global filedata dataframe\n# returns tuple of\n# filemapping: dicom to png paths (as str)\n# fail_path: dicom to failed folder (as tuple)\n# found_err: error code produced when processing\ndef extract_images(filedata, i, png_destination, flattened_to_level, failed, is16Bit):\n ds = dicom.dcmread(filedata.iloc[i].loc['file'], force=True) # read file in\n found_err=None\n filemapping = \"\"\n fail_path = \"\"\n try:\n im = ds.pixel_array # pull image from read dicom\n imName=os.path.split(filedata.iloc[i].loc['file'])[1][:-4] # get file name ex: IM-0107-0022\n\n if flattened_to_level == 'patient':\n ID = filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n folderName = hashlib.sha224(ID.encode('utf-8')).hexdigest()\n # check for existence of patient folder. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n elif flattened_to_level == 'study':\n ID1 = filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n try:\n ID2 = filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.\n except:\n ID2='ALL-STUDIES'\n folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID2.encode('utf-8')).hexdigest()\n # check for existence of the folder tree patient/study/series. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n else:\n ID1=filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n try:\n ID2=filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.\n ID3=filedata.iloc[i].loc['SeriesInstanceUID'] # Unique identifier of the Series.\n except:\n ID2='ALL-STUDIES'\n ID3='ALL-SERIES'\n folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID2.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID3.encode('utf-8')).hexdigest()\n # check for existence of the folder tree patient/study/series. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n\n\n pngfile = png_destination+folderName + '/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'\n dicom_path = filedata.iloc[i].loc['file']\n image_path = png_destination+folderName+'/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'\n if is16Bit:\n # write the PNG file as a 16-bit greyscale \n image_2d = ds.pixel_array.astype(np.double) \n # # Rescaling grey scale between 0-255\n image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 65535.0 \n # # Convert to uint\n shape = ds.pixel_array.shape\n image_2d_scaled = np.uint16(image_2d_scaled) \n with open(pngfile , 'wb') as png_file:\n w = png.Writer(shape[1], shape[0], greyscale=True,bitdepth=16)\n w.write(png_file, image_2d_scaled)\n else: \n shape = ds.pixel_array.shape\n # Convert to float to avoid overflow or underflow losses.\n image_2d = ds.pixel_array.astype(float)\n # Rescaling grey scale between 0-255\n image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 255.0\n # onvert to uint\n image_2d_scaled = np.uint8(image_2d_scaled)\n # Write the PNG file\n with open(pngfile , 'wb') as png_file:\n w = png.Writer(shape[1], shape[0], greyscale=True)\n w.write(png_file, image_2d_scaled)\n filemapping = filedata.iloc[i].loc['file'] + ', ' + pngfile + '\\n'\n except AttributeError as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '1/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except ValueError as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '2/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except BaseException as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '3/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except Exception as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '4/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n return (filemapping, fail_path, found_err)\n\n\n# Function when pydicom fails to read a value attempt to read as other types.\ndef fix_mismatch_callback(raw_elem, **kwargs):\n try:\n if raw_elem.VR: \n values.convert_value(raw_elem.VR, raw_elem)\n except BaseException as err:\n for vr in kwargs['with_VRs']:\n try:\n values.convert_value(vr, raw_elem)\n except ValueError:\n pass\n except TypeError:\n continue\n else:\n raw_elem = raw_elem._replace(VR=vr)\n return raw_elem\n\n\ndef get_path(depth, dicom_home):\n directory = dicom_home + '/'\n i = 0\n while i < depth:\n directory += \"*/\"\n i += 1\n return directory + \"*.dcm\"\n\n \n# Function used by pydicom.\ndef fix_mismatch(with_VRs=['PN', 'DS', 'IS']):\n \"\"\"A callback function to check that RawDataElements are translatable\n with their provided VRs. If not, re-attempt translation using\n some other translators.\n Parameters\n ----------\n with_VRs : list, [['PN', 'DS', 'IS']]\n A list of VR strings to attempt if the raw data element value cannot\n be translated with the raw data element's VR.\n Returns\n -------\n No return value. The callback function will return either\n the original RawDataElement instance, or one with a fixed VR.\n \"\"\"\n dicom.config.data_element_callback = fix_mismatch_callback\n config.data_element_callback_kwargs = {\n 'with_VRs': with_VRs,\n } \n\n\ndef execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,\n processes, flattened_to_level, email, send_email, no_splits, is16Bit, png_destination,\n failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start):\n err = None\n fix_mismatch()\n if processes == 0.5: # use half the cores to avoid high ram usage\n core_count = int(os.cpu_count()/2)\n elif processes == 0: # use all the cores\n core_count = int(os.cpu_count())\n elif processes < os.cpu_count(): # use the specified number of cores to avoid high ram usage\n core_count = processes\n else:\n core_count = int(os.cpu_count())\n # get set up to create dataframe\n dirs = os.listdir(dicom_home)\n # gets all dicom files. if editing this code, get filelist into the format of a list of strings,\n # with each string as the file path to a different dicom file.\n file_path = get_path(depth, dicom_home)\n\n if os.path.isfile(pickle_file):\n f=open(pickle_file,'rb')\n filelist=pickle.load(f)\n else:\n filelist=glob.glob(file_path, recursive=True) # search the folders at the depth we request and finds all dicoms\n pickle.dump(filelist,open(pickle_file,'wb'))\n file_chunks = np.array_split(filelist,no_splits)\n logging.info('Number of dicom files: ' + str(len(filelist)))\n\n try:\n ff = filelist[0] # load first file as a template to look at all\n except IndexError:\n logging.error(\"There is no file present in the given folder in \" + file_path)\n sys.exit(1)\n\n plan = dicom.dcmread(ff, force=True)\n logging.debug('Loaded the first file successfully')\n\n keys = [(aa) for aa in plan.dir() if (hasattr(plan, aa) and aa != 'PixelData')]\n # checks for images in fields and prints where they are\n for field in plan.dir():\n if (hasattr(plan, field) and field!='PixelData'):\n entry = getattr(plan, field)\n if type(entry) is bytes:\n logging.debug(field)\n logging.debug(str(entry))\n\n for i,chunk in enumerate(file_chunks):\n csv_destination = \"{}/meta/metadata_{}.csv\".format(output_directory,i)\n mappings = \"{}/maps/mapping_{}.csv\".format(output_directory,i)\n fm = open(mappings, \"w+\")\n filemapping = 'Original DICOM file location, PNG location \\n'\n fm.write(filemapping)\n\n # add a check to see if the metadata has already been extracted\n # step through whole file list, read in file, append fields to future dataframe of all files\n\n headerlist = []\n # start up a multi processing pool\n # for every item in filelist send data to a subprocess and run extract_headers func\n # output is then added to headerlist as they are completed (no ordering is done)\n with Pool(core_count) as p:\n res= p.imap_unordered(extract_headers, enumerate(chunk))\n for i,e in enumerate(res):\n headerlist.append(e)\n data = pd.DataFrame(headerlist)\n logging.info('Chunk ' + str(i) + ' Number of fields per file : ' + str(len(data.columns)))\n # find common fields\n # make dataframe containing all fields and all files minus those removed in previous block\n # export csv file of final dataframe\n export_csv = data.to_csv(csv_destination, index = None, header=True)\n fields=data.keys()\n count = 0 # potential painpoint\n # writting of log handled by main process\n if print_images:\n logging.info(\"Start processing Images\")\n filedata = data\n total = len(chunk)\n stamp = time.time()\n for i in range(len(filedata)):\n (fmap,fail_path,err) = extract_images(filedata, i, png_destination, flattened_to_level, failed, is16Bit)\n if err:\n count +=1\n copyfile(fail_path[0],fail_path[1])\n err_msg = str(count) + ' out of ' + str(len(chunk)) + ' dicom images have failed extraction'\n logging.error(err_msg)\n else:\n fm.write(fmap)\n fm.close()\n logging.info('Chunk run time: %s %s', time.time() - t_start, ' seconds!')\n\n logging.info('Generating final metadata file')\n\n col_names = dict()\n all_headers = dict()\n total_length = 0\n\n metas = glob.glob( \"{}*.csv\".format(meta_directory))\n # for each meta file identify the columns that are not na's for at least 10% (metadata_col_freq_threshold) of data\n for meta in metas:\n m = pd.read_csv(meta,dtype='str')\n d_len = m.shape[0]\n total_length += d_len\n\n for e in m.columns:\n col_pop = d_len - np.sum(m[e].isna()) # number of populated rows for this column in this metadata file\n\n if e in col_names:\n col_names[e] += col_pop\n else:\n col_names[e] = col_pop\n \n # all_headers keeps track of number of appearances of each header. We later use this count to ensure that\n # the headers we use are present in all metadata files.\n if e in all_headers:\n all_headers[e] += 1\n else:\n all_headers[e] = 1\n\n loadable_names = list()\n for k in col_names.keys():\n if k in all_headers and all_headers[k] >= no_splits: # no_splits == number of batches used \n if col_names[k] >= metadata_col_freq_threshold*total_length:\n loadable_names.append(k) # use header only if it's present in every metadata file\n \n # load every metadata file using only valid columns\n meta_list = list()\n for meta in metas:\n m = pd.read_csv(meta,dtype='str',usecols=loadable_names)\n meta_list.append(m)\n merged_meta = pd.concat(meta_list,ignore_index=True)\n merged_meta.to_csv('{}/metadata.csv'.format(output_directory),index=False)\n # getting a single mapping file\n logging.info('Generatign final mapping file')\n mappings = glob.glob(\"{}/maps/*.csv\".format(output_directory))\n map_list = list()\n for mapping in mappings:\n map_list.append(pd.read_csv(mapping,dtype='str'))\n merged_maps = pd.concat(map_list,ignore_index=True)\n if print_only_common_headers:\n mask_common_fields = merged_maps.isnull().mean() < 0.1\n common_fields = set(np.asarray(merged_maps.columns)[mask_common_fields])\n merged_maps = merged_maps[common_fields]\n merged_maps.to_csv('{}/mapping.csv'.format(output_directory),index=False)\n\n if send_email:\n subprocess.call('echo \"Niffler has successfully completed the png conversion\" | mail -s \"The image conversion'\n ' has been complete\" {0}'.format(email), shell=True)\n # Record the total run-time\n logging.info('Total run time: %s %s', time.time() - t_start, ' seconds!')\n logging.shutdown() # Closing logging file after extraction is done !!\n logs = []\n logs.append(err)\n logs.append(\"The PNG conversion is SUCCESSFUL\")\n return logs\n\n\nif __name__ == \"__main__\":\n with open('config.json', 'r') as f:\n niffler = json.load(f)\n\n # CLI Argument Parser\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"--DICOMHome\", default=niffler['DICOMHome'])\n ap.add_argument(\"--OutputDirectory\", default=niffler['OutputDirectory'])\n ap.add_argument(\"--Depth\", default=niffler['Depth'])\n ap.add_argument(\"--SplitIntoChunks\", default=niffler['SplitIntoChunks'])\n ap.add_argument(\"--PrintImages\", default=niffler['PrintImages'])\n ap.add_argument(\"--CommonHeadersOnly\", default=niffler['CommonHeadersOnly'])\n ap.add_argument(\"--UseProcesses\", default=niffler['UseProcesses'])\n ap.add_argument(\"--FlattenedToLevel\", default=niffler['FlattenedToLevel'])\n ap.add_argument(\"--is16Bit\", default=niffler['is16Bit'])\n ap.add_argument(\"--SendEmail\", default=niffler['SendEmail'])\n ap.add_argument(\"--YourEmail\", default=niffler['YourEmail'])\n\n args = vars(ap.parse_args())\n\n if len(args) > 0:\n initialize_config_and_execute(args)\n else:\n initialize_config_and_execute(niffler)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "numpy.uint16", "numpy.asarray", "numpy.array_split", "pandas.concat", "numpy.maximum", "numpy.uint8" ] ]
PuneethaPai/transformers
[ "a34a9896ac2a4a33ff9cd805c76eed914c8d8965" ]
[ "examples/text-classification/run_glue.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).\"\"\"\n\n\nimport dataclasses\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Dict, Optional\n\nimport numpy as np\n\nfrom transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset\nfrom transformers import GlueDataTrainingArguments as DataTrainingArguments\nfrom transformers import (\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n glue_compute_metrics,\n glue_output_modes,\n glue_tasks_num_labels,\n set_seed,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n training_args.local_rank,\n training_args.device,\n training_args.n_gpu,\n bool(training_args.local_rank != -1),\n training_args.fp16,\n )\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed\n set_seed(training_args.seed)\n\n try:\n num_labels = glue_tasks_num_labels[data_args.task_name]\n output_mode = glue_output_modes[data_args.task_name]\n except KeyError:\n raise ValueError(\"Task not found: %s\" % (data_args.task_name))\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=data_args.task_name,\n cache_dir=model_args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n )\n model = AutoModelForSequenceClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n # Get datasets\n train_dataset = GlueDataset(data_args, tokenizer=tokenizer) if training_args.do_train else None\n eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"dev\") if training_args.do_eval else None\n test_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"test\") if training_args.do_predict else None\n\n def compute_metrics(p: EvalPrediction) -> Dict:\n if output_mode == \"classification\":\n preds = np.argmax(p.predictions, axis=1)\n elif output_mode == \"regression\":\n preds = np.squeeze(p.predictions)\n return glue_compute_metrics(data_args.task_name, preds, p.label_ids)\n\n # Initialize our Trainer\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n trainer.train(\n model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None\n )\n trainer.save_model()\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_master():\n tokenizer.save_pretrained(training_args.output_dir)\n\n # Evaluation\n eval_results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_datasets = [eval_dataset]\n if data_args.task_name == \"mnli\":\n mnli_mm_data_args = dataclasses.replace(data_args, task_name=\"mnli-mm\")\n eval_datasets.append(GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode=\"dev\"))\n\n for eval_dataset in eval_datasets:\n eval_result = trainer.evaluate(eval_dataset=eval_dataset)\n\n output_eval_file = os.path.join(\n training_args.output_dir, f\"eval_results_{eval_dataset.args.task_name}.txt\"\n )\n if trainer.is_world_master():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(eval_dataset.args.task_name))\n for key, value in eval_result.items():\n logger.info(\" %s = %s\", key, value)\n writer.write(\"%s = %s\\n\" % (key, value))\n\n eval_results.update(eval_result)\n\n if training_args.do_predict:\n logging.info(\"*** Test ***\")\n test_datasets = [test_dataset]\n if data_args.task_name == \"mnli\":\n mnli_mm_data_args = dataclasses.replace(data_args, task_name=\"mnli-mm\")\n test_datasets.append(GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode=\"test\"))\n\n for test_dataset in test_datasets:\n predictions = trainer.predict(test_dataset=test_dataset).predictions\n if output_mode == \"classification\":\n predictions = np.argmax(predictions, axis=1)\n\n output_test_file = os.path.join(\n training_args.output_dir, f\"test_results_{test_dataset.args.task_name}.txt\"\n )\n if trainer.is_world_master():\n with open(output_test_file, \"w\") as writer:\n logger.info(\"***** Test results {} *****\".format(test_dataset.args.task_name))\n writer.write(\"index\\tprediction\\n\")\n for index, item in enumerate(predictions):\n if output_mode == \"regression\":\n writer.write(\"%d\\t%3.3f\\n\" % (index, item))\n else:\n item = test_dataset.get_labels()[item]\n writer.write(\"%d\\t%s\\n\" % (index, item))\n return eval_results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.squeeze", "numpy.argmax" ] ]
voidrank/Geo-CNN
[ "4e8a7d5cc0d14ffa2a1b8bef854f294ae4e25f8b" ]
[ "train/test.py" ]
[ "''' Evaluating Frustum PointNets.\nWrite evaluation results to KITTI format labels.\nand [optionally] write results to pickle files.\n\nAuthor: Charles R. Qi\nDate: September 2017\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport argparse\nimport importlib\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\nfrom IPython import embed\nfrom model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER\nimport provider\nfrom train_util import get_batch\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')\nparser.add_argument('--model', default='frustum_pointnets_v1', help='Model name [default: frustum_pointnets_v1]')\nparser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size for inference [default: 32]')\nparser.add_argument('--output', default='test_results', help='output file/folder name [default: test_results]')\nparser.add_argument('--data_path', default=None, help='frustum dataset pickle filepath [default: None]')\nparser.add_argument('--from_rgb_detection', action='store_true', help='test from dataset files from rgb detection.')\nparser.add_argument('--idx_path', default=None, help='filename of txt where each line is a data idx, used for rgb detection -- write <id>.txt for all frames. [default: None]')\nparser.add_argument('--dump_result', action='store_true', help='If true, also dump results to .pickle file')\nparser.add_argument('--vis', action='store_true', help='If true, save visualization')\nFLAGS = parser.parse_args()\n\n# Set training configurations\nBATCH_SIZE = FLAGS.batch_size\nMODEL_PATH = FLAGS.model_path\nGPU_INDEX = FLAGS.gpu\nNUM_POINT = FLAGS.num_point\nMODEL = importlib.import_module(FLAGS.model)\nNUM_CLASSES = 2\nNUM_CHANNEL = 4\n\n# Load Frustum Datasets.\nTEST_DATASET = provider.FrustumDataset(npoints=NUM_POINT, split='val',\n rotate_to_center=True, overwritten_data_path=FLAGS.data_path,\n from_rgb_detection=FLAGS.from_rgb_detection, one_hot=True)\n\ndef get_session_and_ops(batch_size, num_point):\n ''' Define model graph, load model parameters,\n create session and return session handle and tensors\n '''\n with tf.Graph().as_default():\n with tf.device('/gpu:'+str(GPU_INDEX)):\n pointclouds_pl, one_hot_vec_pl, labels_pl, centers_pl, \\\n heading_class_label_pl, heading_residual_label_pl, \\\n size_class_label_pl, size_residual_label_pl = \\\n MODEL.placeholder_inputs(batch_size, num_point)\n is_training_pl = tf.placeholder(tf.bool, shape=())\n end_points = MODEL.get_model(pointclouds_pl, one_hot_vec_pl,\n is_training_pl)\n loss = MODEL.get_loss(labels_pl, centers_pl,\n heading_class_label_pl, heading_residual_label_pl,\n size_class_label_pl, size_residual_label_pl, end_points)\n saver = tf.train.Saver()\n\n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n sess = tf.Session(config=config)\n\n # Restore variables from disk.\n saver.restore(sess, MODEL_PATH)\n ops = {'pointclouds_pl': pointclouds_pl,\n 'one_hot_vec_pl': one_hot_vec_pl,\n 'labels_pl': labels_pl,\n 'centers_pl': centers_pl,\n 'heading_class_label_pl': heading_class_label_pl,\n 'heading_residual_label_pl': heading_residual_label_pl,\n 'size_class_label_pl': size_class_label_pl,\n 'size_residual_label_pl': size_residual_label_pl,\n 'is_training_pl': is_training_pl,\n 'logits': end_points['mask_logits'],\n 'center': end_points['center'],\n 'end_points': end_points,\n 'loss': loss,\n 'vis': end_points['vis']}\n return sess, ops\n\ndef softmax(x):\n ''' Numpy function for softmax'''\n shape = x.shape\n probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))\n probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)\n return probs\n\ndef inference(sess, ops, pc, one_hot_vec, batch_size):\n ''' Run inference for frustum pointnets in batch mode '''\n assert pc.shape[0]%batch_size == 0\n num_batches = pc.shape[0]//batch_size\n logits = np.zeros((pc.shape[0], pc.shape[1], NUM_CLASSES))\n centers = np.zeros((pc.shape[0], 3))\n heading_logits = np.zeros((pc.shape[0], NUM_HEADING_BIN))\n heading_residuals = np.zeros((pc.shape[0], NUM_HEADING_BIN))\n size_logits = np.zeros((pc.shape[0], NUM_SIZE_CLUSTER))\n size_residuals = np.zeros((pc.shape[0], NUM_SIZE_CLUSTER, 3))\n scores = np.zeros((pc.shape[0],)) # 3D box score`\n vis = np.zeros((pc.shape[0], pc.shape[1]))\n\n ep = ops['end_points']\n for i in range(num_batches):\n feed_dict = {\\\n ops['pointclouds_pl']: pc[i*batch_size:(i+1)*batch_size,...],\n ops['one_hot_vec_pl']: one_hot_vec[i*batch_size:(i+1)*batch_size,:],\n ops['is_training_pl']: False}\n\n batch_logits, batch_centers, \\\n batch_heading_scores, batch_heading_residuals, \\\n batch_size_scores, batch_size_residuals, batch_vis = \\\n sess.run([ops['logits'], ops['center'],\n ep['heading_scores'], ep['heading_residuals'],\n ep['size_scores'], ep['size_residuals'], ops['vis']],\n feed_dict=feed_dict)\n\n logits[i*batch_size:(i+1)*batch_size,...] = batch_logits\n centers[i*batch_size:(i+1)*batch_size,...] = batch_centers\n heading_logits[i*batch_size:(i+1)*batch_size,...] = batch_heading_scores\n heading_residuals[i*batch_size:(i+1)*batch_size,...] = batch_heading_residuals\n size_logits[i*batch_size:(i+1)*batch_size,...] = batch_size_scores\n size_residuals[i*batch_size:(i+1)*batch_size,...] = batch_size_residuals\n if FLAGS.vis:\n vis[i*batch_size:(i+1)*batch_size,...] = batch_vis[:,:,0].mean(axis=2)\n\n # Compute scores\n batch_seg_prob = softmax(batch_logits)[:,:,1] # BxN\n batch_seg_mask = np.argmax(batch_logits, 2) # BxN\n mask_mean_prob = np.sum(batch_seg_prob * batch_seg_mask, 1) # B,\n mask_mean_prob = mask_mean_prob / np.sum(batch_seg_mask,1) # B,\n heading_prob = np.max(softmax(batch_heading_scores),1) # B\n size_prob = np.max(softmax(batch_size_scores),1) # B,\n batch_scores = np.log(mask_mean_prob) + np.log(heading_prob) + np.log(size_prob)\n scores[i*batch_size:(i+1)*batch_size] = batch_scores \n # Finished computing scores\n\n heading_cls = np.argmax(heading_logits, 1) # B\n size_cls = np.argmax(size_logits, 1) # B\n heading_res = np.array([heading_residuals[i,heading_cls[i]] \\\n for i in range(pc.shape[0])])\n size_res = np.vstack([size_residuals[i,size_cls[i],:] \\\n for i in range(pc.shape[0])])\n\n return np.argmax(logits, 2), centers, heading_cls, heading_res, \\\n size_cls, size_res, scores, vis\n\ndef write_detection_results(result_dir, id_list, type_list, box2d_list, center_list, \\\n heading_cls_list, heading_res_list, \\\n size_cls_list, size_res_list, \\\n rot_angle_list, score_list):\n ''' Write frustum pointnets results to KITTI format label files. '''\n if result_dir is None: return\n results = {} # map from idx to list of strings, each string is a line (without \\n)\n for i in range(len(center_list)):\n idx = id_list[i]\n output_str = type_list[i] + \" -1 -1 -10 \"\n box2d = box2d_list[i]\n output_str += \"%f %f %f %f \" % (box2d[0],box2d[1],box2d[2],box2d[3])\n h,w,l,tx,ty,tz,ry = provider.from_prediction_to_label_format(center_list[i],\n heading_cls_list[i], heading_res_list[i],\n size_cls_list[i], size_res_list[i], rot_angle_list[i])\n score = score_list[i]\n output_str += \"%f %f %f %f %f %f %f %f\" % (h,w,l,tx,ty,tz,ry,score)\n if idx not in results: results[idx] = []\n results[idx].append(output_str)\n\n # Write TXT files\n if not os.path.exists(result_dir): os.mkdir(result_dir)\n output_dir = os.path.join(result_dir, 'data')\n if not os.path.exists(output_dir): os.mkdir(output_dir)\n for idx in results:\n pred_filename = os.path.join(output_dir, '%06d.txt'%(idx))\n fout = open(pred_filename, 'w')\n for line in results[idx]:\n fout.write(line+'\\n')\n fout.close() \n\ndef fill_files(output_dir, to_fill_filename_list):\n ''' Create empty files if not exist for the filelist. '''\n for filename in to_fill_filename_list:\n filepath = os.path.join(output_dir, filename)\n if not os.path.exists(filepath):\n fout = open(filepath, 'w')\n fout.close()\n\ndef test_from_rgb_detection(output_filename, result_dir=None):\n ''' Test frustum pointents with 2D boxes from a RGB detector.\n Write test results to KITTI format label files.\n todo (rqi): support variable number of points.\n '''\n ps_list = []\n segp_list = []\n center_list = []\n heading_cls_list = []\n heading_res_list = []\n size_cls_list = []\n size_res_list = []\n rot_angle_list = []\n score_list = []\n onehot_list = []\n vis_list = []\n\n test_idxs = np.arange(0, len(TEST_DATASET))\n print(len(TEST_DATASET))\n batch_size = BATCH_SIZE\n num_batches = int((len(TEST_DATASET)+batch_size-1)/batch_size)\n\n batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))\n batch_one_hot_to_feed = np.zeros((batch_size, 3))\n sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)\n for batch_idx in range(num_batches):\n print('batch idx: %d' % (batch_idx))\n start_idx = batch_idx * batch_size\n end_idx = min(len(TEST_DATASET), (batch_idx+1) * batch_size)\n cur_batch_size = end_idx - start_idx\n\n batch_data, batch_rot_angle, batch_rgb_prob, batch_one_hot_vec = \\\n get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,\n NUM_POINT, NUM_CHANNEL, from_rgb_detection=True)\n batch_data_to_feed[0:cur_batch_size,...] = batch_data\n batch_one_hot_to_feed[0:cur_batch_size,:] = batch_one_hot_vec\n\n # Run one batch inference\n batch_output, batch_center_pred,\\\n batch_hclass_pred, batch_hres_pred, \\\n batch_sclass_pred, batch_sres_pred, batch_scores, batch_vis = \\\n inference(sess, ops, batch_data_to_feed,\n batch_one_hot_to_feed, batch_size=batch_size)\n\n for i in range(cur_batch_size):\n ps_list.append(batch_data[i,...])\n segp_list.append(batch_output[i,...])\n center_list.append(batch_center_pred[i,:])\n heading_cls_list.append(batch_hclass_pred[i])\n heading_res_list.append(batch_hres_pred[i])\n size_cls_list.append(batch_sclass_pred[i])\n size_res_list.append(batch_sres_pred[i,:])\n rot_angle_list.append(batch_rot_angle[i])\n #score_list.append(batch_scores[i])\n score_list.append(batch_rgb_prob[i]) # 2D RGB detection score\n onehot_list.append(batch_one_hot_vec[i])\n vis_list.append(batch_vis[i,:])\n\n if FLAGS.dump_result:\n with open(output_filename, 'wp') as fp:\n pickle.dump(ps_list, fp)\n pickle.dump(segp_list, fp)\n pickle.dump(center_list, fp)\n pickle.dump(heading_cls_list, fp)\n pickle.dump(heading_res_list, fp)\n pickle.dump(size_cls_list, fp)\n pickle.dump(size_res_list, fp)\n pickle.dump(rot_angle_list, fp)\n pickle.dump(score_list, fp)\n pickle.dump(onehot_list, fp)\n\n if FLAGS.vis:\n with open(output_filename, 'wb') as fp:\n pickle.dump(ps_list, fp)\n pickle.dump(vis_list, fp)\n\n # Write detection results for KITTI evaluation\n print('Number of point clouds: %d' % (len(ps_list)))\n write_detection_results(result_dir, TEST_DATASET.id_list,\n TEST_DATASET.type_list, TEST_DATASET.box2d_list,\n center_list, heading_cls_list, heading_res_list,\n size_cls_list, size_res_list, rot_angle_list, score_list)\n # Make sure for each frame (no matter if we have measurment for that frame),\n # there is a TXT file\n output_dir = os.path.join(result_dir, 'data')\n if FLAGS.idx_path is not None:\n to_fill_filename_list = [line.rstrip()+'.txt' \\\n for line in open(FLAGS.idx_path)]\n fill_files(output_dir, to_fill_filename_list)\n\ndef test(output_filename, result_dir=None):\n ''' Test frustum pointnets with GT 2D boxes.\n Write test results to KITTI format label files.\n todo (rqi): support variable number of points.\n '''\n ps_list = []\n seg_list = []\n segp_list = []\n center_list = []\n heading_cls_list = []\n heading_res_list = []\n size_cls_list = []\n size_res_list = []\n rot_angle_list = []\n score_list = []\n\n test_idxs = np.arange(0, len(TEST_DATASET))\n batch_size = BATCH_SIZE\n num_batches = len(TEST_DATASET)//batch_size\n\n sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)\n\n correct_cnt = 0\n for batch_idx in range(num_batches):\n print('batch idx: %d' % (batch_idx))\n start_idx = batch_idx * batch_size\n end_idx = (batch_idx+1) * batch_size\n\n batch_data, batch_label, batch_center, \\\n batch_hclass, batch_hres, batch_sclass, batch_sres, \\\n batch_rot_angle, batch_one_hot_vec = \\\n get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,\n NUM_POINT, NUM_CHANNEL)\n\n batch_output, batch_center_pred, \\\n batch_hclass_pred, batch_hres_pred, \\\n batch_sclass_pred, batch_sres_pred, batch_scores = \\\n inference(sess, ops, batch_data,\n batch_one_hot_vec, batch_size=batch_size)\n\n correct_cnt += np.sum(batch_output==batch_label)\n\t\n for i in range(batch_output.shape[0]):\n ps_list.append(batch_data[i,...])\n seg_list.append(batch_label[i,...])\n segp_list.append(batch_output[i,...])\n center_list.append(batch_center_pred[i,:])\n heading_cls_list.append(batch_hclass_pred[i])\n heading_res_list.append(batch_hres_pred[i])\n size_cls_list.append(batch_sclass_pred[i])\n size_res_list.append(batch_sres_pred[i,:])\n rot_angle_list.append(batch_rot_angle[i])\n score_list.append(batch_scores[i])\n\n print(\"Segmentation accuracy: %f\" % \\\n (correct_cnt / float(batch_size*num_batches*NUM_POINT)))\n\n if FLAGS.dump_result:\n with open(output_filename, 'wp') as fp:\n pickle.dump(ps_list, fp)\n pickle.dump(seg_list, fp)\n pickle.dump(segp_list, fp)\n pickle.dump(center_list, fp)\n pickle.dump(heading_cls_list, fp)\n pickle.dump(heading_res_list, fp)\n pickle.dump(size_cls_list, fp)\n pickle.dump(size_res_list, fp)\n pickle.dump(rot_angle_list, fp)\n pickle.dump(score_list, fp)\n\n # Write detection results for KITTI evaluation\n write_detection_results(result_dir, TEST_DATASET.id_list,\n TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list,\n heading_cls_list, heading_res_list,\n size_cls_list, size_res_list, rot_angle_list, score_list)\n\n\nif __name__=='__main__':\n if FLAGS.from_rgb_detection:\n test_from_rgb_detection(FLAGS.output+'.pickle', FLAGS.output)\n else:\n test(FLAGS.output+'.pickle', FLAGS.output)\n" ]
[ [ "numpy.sum", "tensorflow.placeholder", "numpy.zeros", "numpy.argmax", "tensorflow.Graph", "numpy.log", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.ConfigProto" ] ]
rubind/wfc3_psf
[ "68e0d4b88e4a614939ae0c8771e37f315574ab82" ]
[ "wfc3_psf.py" ]
[ "import numpy as np\nfrom astropy.io import fits\nfrom scipy.interpolate import RectBivariateSpline\n\ndef index_PSF(PSF, x, y):\n return PSF[x+y*3]\n\ndef get_native_PSF(filt, x, y, the_path):\n x = float(np.clip(x, 0, 1014))\n y = float(np.clip(y, 0, 1014))\n\n f = fits.open(\"%sPSFSTD_WFC3IR_%s.fits\" % (the_path, filt))\n PSF = f[0].data\n f.close()\n\n if x < 507:\n sx = x/507.\n minx = 0\n else:\n sx = (x - 507.)/507.\n minx = 1\n \n if y < 507:\n sy = y/507.\n miny = 0\n else:\n sy = (y - 507.)/507.\n miny = 1\n\n out_PSF = 0.\n for dx in [0, 1]:\n for dy in [0, 1]:\n this_x = minx + dx\n this_y = miny + dy\n this_w = (sx*(dx == 1) + (1 - sx)*(dx == 0))*(sy*(dy == 1) + (1 - sy)*(dy == 0))\n print (\"x\", x, \"y\", y, \"this_x\", this_x, \"this_y\", this_y, \"this_w\", this_w)\n out_PSF += index_PSF(PSF, x = this_x, y = this_y)*this_w\n return out_PSF\n\ndef get_sampled_PSF(filt, x, y, subsample, the_path = \"./\"):\n native_PSF = get_native_PSF(filt, x, y, the_path)\n orig_sub = np.arange(len(native_PSF), dtype=np.float64)*0.25\n orig_sub -= np.median(orig_sub)\n\n ifn = RectBivariateSpline(orig_sub, orig_sub, native_PSF, kx = 3, ky = 3, s=0)\n new_sub = np.arange(len(native_PSF)*subsample/4., dtype=np.float64)/subsample\n new_sub -= np.median(new_sub)\n\n return ifn(new_sub, new_sub)\n\n" ]
[ [ "scipy.interpolate.RectBivariateSpline", "numpy.clip", "numpy.median" ] ]
CUrW-SL/DSS-Framework
[ "43a39b322ffb0eb92dd116e77cf9a8479357a121" ]
[ "accuracy_unit/wrf/wrf_accuracy.py" ]
[ "import math\nfrom datetime import datetime, timedelta\nimport sys\nfrom airflow.models import Variable\nimport pandas as pd\nimport numpy as np\n\nsys.path.insert(0, '/home/curw/git/DSS-Framework/db_util')\n# sys.path.insert(0, '/home/hasitha/PycharmProjects/DSS-Framework/db_util')\nfrom gen_db import CurwFcstAdapter, CurwObsAdapter, CurwSimAdapter\nfrom dss_db import RuleEngineAdapter\n\nCOMMON_DATE_TIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nSTATION_TYPE = 'CUrW_WeatherStation'\nMME_TAG = 'MDPA'\nVARIABLE_TYPE = 'rainfall'\nVARIABLE = 1\nUNIT = 1\nOBS_VARIABLE = 10\nOBS_UNIT = 9\nGFS_DAYS = 3\n\n\ndef get_curw_dss_adapter(db_config=None):\n if db_config is None:\n db_config = Variable.get('db_config', deserialize_json=True)\n adapter = RuleEngineAdapter.get_instance(db_config)\n return adapter\n\n\ndef get_curw_fcst_adapter(db_config=None):\n if db_config is None:\n db_config = Variable.get('fcst_db_config', deserialize_json=True)\n adapter = CurwFcstAdapter.get_instance(db_config)\n return adapter\n\n\ndef get_curw_obs_adapter(db_config=None):\n if db_config is None:\n db_config = Variable.get('obs_db_config', deserialize_json=True)\n adapter = CurwObsAdapter.get_instance(db_config)\n return adapter\n\n\ndef get_curw_sim_adapter(db_config=None):\n if db_config is None:\n db_config = Variable.get('sim_db_config', deserialize_json=True)\n adapter = CurwSimAdapter.get_instance(db_config)\n return adapter\n\n\ndef calculate_wrf_rule_accuracy(wrf_rule, exec_datetime):\n print('calculate_wrf_rule_accuracy|wrf_rule : ', wrf_rule)\n print('calculate_wrf_rule_accuracy|execution_date : ', exec_datetime)\n wrf_model = 'WRF_{}'.format(wrf_rule['model'])\n print('calculate_wrf_rule_accuracy|wrf_model : ', wrf_model)\n wrf_version = wrf_rule['version']\n wrf_run = wrf_rule['rule_info']['run']\n wrf_rule_id = wrf_rule['rule_info']['id']\n gfs_hour = wrf_rule['rule_info']['hour']\n accuracy_rule_id = wrf_rule['rule_info']['accuracy_rule']\n sim_tag = 'gfs_d{}_{}'.format(wrf_run, gfs_hour)\n print('calculate_wrf_rule_accuracy|sim_tag : ', sim_tag)\n dss_adapter = get_curw_dss_adapter()\n accuracy_rule = dss_adapter.get_accuracy_rule_info_by_id(accuracy_rule_id)\n print('calculate_wrf_rule_accuracy|accuracy_rule : ', accuracy_rule)\n obs_station_list = format_obs_station_list(accuracy_rule['observed_stations'], accuracy_rule['allowed_error'])\n success_count = 0\n if len(obs_station_list) > 0:\n for [obs_station, allowed_error] in obs_station_list:\n station_error = calculate_station_accuracy(obs_station, wrf_model, wrf_version, wrf_run, gfs_hour,\n exec_datetime, sim_tag)\n if station_error is not None:\n if station_error <= allowed_error:\n success_count + 1\n total_stations = len(obs_station_list)\n print('calculate_wrf_rule_accuracy|total_stations : ', total_stations)\n print('calculate_wrf_rule_accuracy|success_count : ', success_count)\n accuracy_percentage = (success_count / total_stations) * 100\n print('calculate_wrf_rule_accuracy|accuracy_percentage : ', total_stations)\n dss_adapter.update_wrf_rule_accuracy_level(accuracy_percentage, wrf_rule_id)\n print('wrf rule current accuracy successfully updated.')\n accuracy_rule = dss_adapter.get_accuracy_rule_info_by_id(accuracy_rule_id)\n expected_accuracy = float(accuracy_rule['rule_accuracy'])\n if accuracy_percentage >= expected_accuracy:\n return True\n else:\n return False\n\n\ndef calculate_station_accuracy(obs_station, wrf_model, wrf_version, wrf_run, gfs_hour,\n exec_datetime, sim_tag, method='MAD'):\n obs_adapter = get_curw_obs_adapter()\n obs_station_id = get_obs_station_id(obs_station, obs_adapter)\n [tms_start, tms_end] = get_wrf_ts_start_end(exec_datetime, wrf_run, gfs_hour)\n tms_start = tms_start.strftime('%Y-%m-%d %H:%M:%S')\n tms_end = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if obs_station_id is not None:\n obs_hash_id = get_obs_station_hash_id(obs_station_id, obs_adapter)\n obs_df = get_obs_tms(obs_hash_id, exec_datetime, tms_start, tms_end, obs_adapter)\n if obs_df is not None:\n sim_adapter = get_curw_sim_adapter()\n wrf_station_id = get_matching_wrf_station(obs_station, obs_station_id, sim_adapter)\n print('calculate_station_accuracy|wrf_station_id : ', wrf_station_id)\n if wrf_station_id is not None:\n fcst_adapter = get_curw_fcst_adapter()\n wrf_hash_id = get_wrf_station_hash_id(wrf_model, wrf_version, wrf_station_id, exec_datetime, sim_tag,\n fcst_adapter)\n print('calculate_station_accuracy|wrf_hash_id : ', wrf_hash_id)\n if wrf_hash_id is not None:\n fcst_df = get_fcst_tms(wrf_hash_id, exec_datetime, tms_start, tms_end, fcst_adapter)\n if fcst_df is not None:\n print('calculate_station_accuracy|obs_df : ', obs_df)\n print('calculate_station_accuracy|fcst_df : ', fcst_df)\n merged_df = obs_df.merge(fcst_df, how='left', on='time')\n merged_df['cumulative_observed'] = merged_df['observed'].cumsum()\n merged_df['cumulative_forecast'] = merged_df['forecast'].cumsum()\n print(merged_df)\n merged_df['cum_diff'] = merged_df[\"cumulative_observed\"] - merged_df[\"cumulative_forecast\"]\n row_count = len(merged_df.index)\n print('row_count : ', row_count)\n if method == 'MAD':\n print('MAD')\n merged_df['abs_cum_diff'] = merged_df['cum_diff'].abs()\n sum_abs_diff = merged_df['abs_diff'].sum()\n print('sum_abs_diff : ', sum_abs_diff)\n mean_absolute_deviation = sum_abs_diff / row_count\n print('mean_absolute_deviation : ', mean_absolute_deviation)\n return mean_absolute_deviation\n elif method == 'RMSE':\n print('RMSE')\n merged_df['diff_square'] = np.power((merged_df['cum_diff']), 2)\n root_mean_square_error = math.sqrt(merged_df['diff_square'].sum() / row_count)\n print('root_mean_square_error : ', root_mean_square_error)\n return root_mean_square_error\n else:\n print('Invalid method.')\n return None\n\n\ndef format_obs_station_list(obs_stations, allowed_error):\n station_list = obs_stations.split(\",\")\n print(station_list)\n formatted_list = []\n for station in station_list:\n station_val = station.split('-')\n if len(station_val) == 2:\n formatted_list.append([station_val[0], station_val[1]])\n else:\n formatted_list.append([station_val[0], allowed_error])\n print(formatted_list)\n return formatted_list\n\n\ndef get_obs_station_id(obs_station, obs_adapter=None):\n if obs_adapter is None:\n obs_adapter = get_curw_obs_adapter()\n station_id = obs_adapter.get_station_id_by_name(STATION_TYPE, obs_station)\n if station_id is not None:\n print('get_obs_station_id|station_id : ', station_id)\n return station_id\n\n\ndef get_obs_station_hash_id(obs_station_id, obs_adapter=None):\n if obs_adapter is None:\n obs_adapter = get_curw_obs_adapter()\n hash_id = obs_adapter.get_station_hash_id(obs_station_id, OBS_VARIABLE, OBS_UNIT)\n if hash_id is not None:\n print('get_obs_station_hash_id|hash_id : ', hash_id)\n return hash_id\n\n\ndef get_matching_wrf_station(obs_station, obs_station_id, sim_adapter=None):\n if obs_station_id is not None:\n grid_id = '{}_{}_{}_{}'.format(VARIABLE_TYPE, obs_station_id, obs_station, MME_TAG)\n print('get_matching_wrf_station|grid_id : ', grid_id)\n if sim_adapter is None:\n sim_adapter = get_curw_sim_adapter()\n wrf_station_id = sim_adapter.get_matching_wrf_station_by_grid_id(grid_id)\n if wrf_station_id is not None:\n print('get_matching_wrf_station|wrf_station_id : ', wrf_station_id)\n return wrf_station_id\n return None\n\n\ndef get_wrf_station_hash_id(wrf_model, wrf_version, wrf_station_id, exec_date, sim_tag, fcst_adapter=None):\n if fcst_adapter is None:\n fcst_adapter = get_curw_fcst_adapter()\n source_id = fcst_adapter.get_source_id(wrf_model, wrf_version)\n if source_id is not None:\n print('get_wrf_station_hash_id|source_id : ', source_id)\n hash_id = fcst_adapter.get_hash_id_of_station(VARIABLE, UNIT, source_id, wrf_station_id, sim_tag, exec_date)\n if hash_id is not None:\n print('get_wrf_station_hash_id|hash_id : ', hash_id)\n return hash_id\n\n\ndef get_wrf_ts_start_end(exec_datetime, wrf_run, gfs_hour):\n wrf_run = int(wrf_run)\n exec_datetime = datetime.strptime(exec_datetime, '%Y-%m-%d %H:%M:%S')\n print(exec_datetime)\n exec_date_str = exec_datetime.strftime('%Y-%m-%d')\n exec_date = datetime.strptime(exec_date_str, '%Y-%m-%d')\n print(exec_date)\n ts_start_date = exec_date - timedelta(days=wrf_run)\n ts_start_date_str = ts_start_date.strftime('%Y-%m-%d')\n print(ts_start_date_str)\n gfs_ts_start_utc_str = '{} {}:00:00'.format(ts_start_date_str, gfs_hour)\n print(gfs_ts_start_utc_str)\n gfs_ts_start_utc = datetime.strptime(gfs_ts_start_utc_str, '%Y-%m-%d %H:%M:%S')\n gfs_ts_start_local = gfs_ts_start_utc + timedelta(hours=5, minutes=30)\n gfs_ts_end_local = gfs_ts_start_local + timedelta(days=GFS_DAYS)\n return [gfs_ts_start_local, gfs_ts_end_local]\n\n\ndef get_fcst_tms(wrf_station_hash_id, exec_datetime, tms_start, tms_end, fcst_adapter=None):\n if fcst_adapter is None:\n fcst_adapter = get_curw_fcst_adapter()\n tms_df = fcst_adapter.get_station_tms(wrf_station_hash_id, exec_datetime, tms_start, tms_end)\n if tms_df is not None:\n return format_df_to_time_indexing(tms_df)\n\n\ndef format_df_to_time_indexing(tms_df):\n tms_df['time'] = pd.to_datetime(tms_df['time'], format=COMMON_DATE_TIME_FORMAT)\n print('format_df_to_time_indexing|tms_df : ', tms_df)\n tms_df.set_index('time', inplace=True)\n return tms_df\n\n\ndef get_obs_tms(obs_station_hash_id, exec_datetime, tms_start, tms_end, obs_adapter=None):\n if obs_adapter is None:\n obs_adapter = get_curw_obs_adapter()\n tms_df = obs_adapter.get_timeseries_by_id(obs_station_hash_id, tms_start, tms_end)\n if tms_df is not None:\n return format_df_to_15min_intervals(tms_df)\n\n\ndef format_df_to_15min_intervals(tms_df):\n tms_df = format_df_to_time_indexing(tms_df)\n min15_ts = pd.DataFrame()\n min15_ts['value'] = tms_df['value'].resample('15min', label='right', closed='right').sum()\n print(min15_ts)\n return min15_ts\n\n\nif __name__ == \"__main__\":\n # obs_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',\n # 'mysql_db': 'curw_obs', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}\n # print(len(obs_db_config.keys()))\n # sim_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',\n # 'mysql_db': 'curw_sim', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}\n # fcst_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',\n # 'mysql_db': 'curw_fcst', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}\n # obs_adapter = get_curw_obs_adapter(obs_db_config)\n # sim_adapter = get_curw_sim_adapter(sim_db_config)\n # fcst_adapter = get_curw_fcst_adapter(fcst_db_config)\n # print(get_matching_wrf_station('Arangala', obs_adapter, sim_adapter))\n print(get_wrf_ts_start_end('2019-12-07 07:21:32', '2', '12'))\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "numpy.power" ] ]
zzzace2000/cairl_nodegam
[ "90d0d56a0e7be3d1cbba6179cbfc36d626456770" ]
[ "lib/mma.py" ]
[ "import logging\n\nimport cvxpy as cvx\nimport numpy as np\nfrom numpy.linalg import norm\nfrom tqdm import tqdm\n\n\nclass MaxMarginAbbeel(object):\n \"\"\"\n implementation of (Abbeel & Ng 2004)\n\n two versions: available\n\n 1. max-margin (stable, computationally more heavy)\n 2. projection (simpler)\n\n \"\"\"\n\n def __init__(self,\n pi_init,\n p,\n mu_expert,\n irl_precision,\n mdp_solver,\n mu_estimator,\n evaluators,\n method=\"max_margin\",\n slack_scale=0.01,\n use_slack=False,\n stochastic=True,\n delta=0.2\n ):\n \"\"\"TODO: to be defined1.\n\n Parameters\n ----------\n p : int\n dimension of phi\n mu_expert : target for feature expectation IRL\n mu_estimator : function\n estimate E[mu(s_0) | pi, D]\n evaluator : function\n evaluate i.t.o perf score and action matching\n irl_precision : convergence threshold\n use_slack : whether to use slack for convex optimization\n slack_scale : scaling term\n method: max_margin or projection\n \"\"\"\n self._pi_init = pi_init\n self._p = p\n self._mu_expert = mu_expert\n self._mu_estimator = mu_estimator\n self._irl_precision = irl_precision\n self._method = method\n self._evaluators = evaluators\n self._mdp_solver = mdp_solver\n self._use_slack = use_slack\n self._slack_scale = slack_scale\n self._stochastic = stochastic\n self._delta = delta\n\n def run(self, n_iteration):\n \"\"\"TODO: Docstring for something.\n\n Parameters\n ----------\n n_iteration : max iteration count\n\n Returns\n -------\n exp results\n \"\"\"\n mu_estimator = self._mu_estimator\n stochastic = self._stochastic\n\n pi_list = []\n pi_best_list = []\n mu_list = []\n mu_bar_list = []\n weight_list = []\n weight_best_list = []\n margin_v_list = []\n margin_mu_list = []\n\n pi_list.append(self._pi_init)\n\n mu_estimator.fit(self._pi_init, stochastic)\n mu_irl = mu_estimator.estimate()\n\n mu_list.append(mu_irl)\n mu_bar_list.append(mu_irl)\n\n weight_list.append(-1.0)\n margin_v_list.append(-1.0)\n margin_mu_list.append(-1.0)\n\n eval_metrics = {}\n\n # Evaluate the inital policy\n for e in self._evaluators:\n the_metrics = e.evaluate(self._pi_init)\n for k, v in the_metrics.items():\n if k not in eval_metrics:\n eval_metrics[k] = []\n eval_metrics['best_' + k] = []\n eval_metrics[k].append(v)\n\n for epi_i in tqdm(range(n_iteration)):\n if self._method == \"max_margin\":\n W, (margin_v, margin_mu, converged) = self._optimize(mu_list)\n elif self._method == \"projection\":\n W, (margin_v, margin_mu, converged, mu_bar_im1) = \\\n self._optimize_projection(mu_list, mu_bar_list)\n mu_bar_list.append(mu_bar_im1)\n else:\n raise Exception(\"Unknown IRL solver\")\n\n weight_list.append(W)\n margin_v_list.append(margin_v)\n margin_mu_list.append(margin_mu)\n logging.info(\"margin_v: {}\".format(margin_v))\n logging.info(\"margin_mu: {}\".format(margin_mu))\n margin_hyperplane = 2 / norm(W, 2)\n logging.info(\"margin_hyperplane: {}\".format(margin_hyperplane))\n\n if converged:\n logging.info(\"margin_mu converged after {} iterations\".format(epi_i + 1))\n break\n\n pi_irl = self._mdp_solver.solve(reward_fn=lambda obs_next: obs_next.dot(W))\n pi_list.append(pi_irl)\n\n mu_estimator.fit(pi_irl, stochastic)\n mu_irl = mu_estimator.estimate()\n\n mu_list.append(mu_irl)\n logging.info(\"mu_irl: {}\".format(mu_irl))\n\n mu_list_ = np.array([mu.flatten() for mu in mu_list])\n mixture_weight_list = self._choose_mixture_weight(mu_list_, self._mu_expert)\n logging.info(\"mixture_weight_list: {}\".format(mixture_weight_list))\n\n # pi_best = MixturePolicy(mixture_weight_list, pi_list)\n pi_best = 0\n for w, p in zip(mixture_weight_list, pi_list):\n pi_best += w * p\n pi_best_list.append(pi_best)\n\n best_mu = mixture_weight_list.T.dot(mu_list_)\n w_best = self._mu_expert - best_mu\n w_best /= norm(w_best, 2)\n weight_best_list.append(w_best)\n\n # Do the evaluations\n for e in self._evaluators:\n the_metrics = e.evaluate(pi_best)\n for k, v in the_metrics.items():\n eval_metrics['best_' + k].append(v)\n the_metrics = e.evaluate(pi_irl)\n for k, v in the_metrics.items():\n eval_metrics[k].append(v)\n logging.info(\"eval_metrics: {}\".format(eval_metrics))\n\n results = {\n \"margin_v\": margin_v_list,\n \"margin_mu\": margin_mu_list,\n \"mu\": mu_list,\n \"weight\": weight_list,\n \"policy\": pi_list,\n \"policy_best\": pi_best_list,\n \"weight_best\": weight_best_list,\n }\n return results, eval_metrics\n\n def _choose_mixture_weight(self, mu_list, mu_exp):\n \"\"\"\n implement the choice of policy in\n Section 3.0 in Abbeel, Ng (2004)\n\n Parameters\n ----------\n mu_list : TODO\n\n Returns\n -------\n pi_best\n\n \"\"\"\n lamda = cvx.Variable(len(mu_list))\n\n obj = cvx.Minimize(cvx.norm(mu_exp - mu_list.T @ lamda, p=2))\n constraints = [lamda >= 0, sum(lamda) == 1]\n\n prob = cvx.Problem(obj, constraints)\n prob.solve()\n\n if prob.status in [\"unbounded\", \"infeasible\"]:\n logging.warning(\"the optimization failed: {}\".format(prob.status))\n\n weight_list = np.array(lamda.value).flatten()\n tol = 1e-6\n weight_list[np.abs(weight_list) < tol] = 0.0\n weight_list /= np.sum(weight_list)\n return weight_list\n\n def _optimize(self, mu_list):\n \"\"\"linearly parametrize reward function.\n\n implements Eq. 11 from Abbeel\n\n Parameters\n ----------\n W : weight\n\n Returns\n -------\n TODO\n - think whether to do s, a or just s\n\n \"\"\"\n logging.info(\"solving for W given mu_list\")\n # define variables\n W = cvx.Variable(self._p)\n t = cvx.Variable(1)\n\n if self._use_slack:\n xi = cvx.Variable(1)\n\n mu_exp = cvx.Parameter(self._p)\n mu_exp.value = self._mu_expert.flatten()\n\n if self._use_slack:\n C = cvx.Parameter(1)\n C.value = self._slack_scale\n obj = cvx.Maximize(t - C * xi)\n else:\n obj = cvx.Maximize(t)\n\n constraints = []\n\n for mu in mu_list:\n mu = mu.flatten()\n if self._use_slack:\n constraints += [W.T @ mu_exp + xi >= W.T @ mu + t]\n else:\n constraints += [W.T @ mu_exp >= W.T @ mu + t]\n constraints += [cvx.norm(W, 2) <= 1]\n\n prob = cvx.Problem(obj, constraints)\n prob.solve()\n\n if prob.status in [\"unbounded\", \"infeasible\"]:\n logging.warning(\"the optimization failed: {}\".format(prob.status))\n\n W = np.array(W.value)\n margin_v = t.value\n\n mu_list = np.array([mu.flatten() for mu in mu_list])\n margin_mu_list = norm(np.array(mu_exp.value).T - mu_list, 2, axis=1)\n margin_mu = np.min(margin_mu_list)\n\n converged = margin_mu <= self._irl_precision\n return W, (margin_v, margin_mu, converged)\n\n def _optimize_projection(self, mu_list, mu_bar_list):\n \"\"\"linearly parametrize reward function.\n\n implements Sec. 3.1 from Abbeel, Ng (2004)\n\n Parameters\n ----------\n W : weight\n\n Returns\n -------\n TODO\n - think whether to do s, a or just s\n\n \"\"\"\n mu_e = self._mu_expert\n mu_im1 = mu_list[-1]\n mu_bar_im2 = mu_bar_list[-1]\n\n if len(mu_bar_list) == 1:\n mu_bar_im1 = mu_list[-1]\n w_i = mu_e - mu_im1\n else:\n a = mu_im1 - mu_bar_im2\n b = mu_e - mu_bar_im2\n mu_bar_im1 = (mu_bar_im2 + a.T.dot(b) / norm(a)**2) * a\n w_i = mu_e - mu_bar_im1\n\n w_i /= np.linalg.norm(w_i, 2)\n t_i = np.linalg.norm(w_i, 2)\n\n margin_v = w_i.T.dot(mu_e - mu_bar_im1)\n margin_mu = t_i\n\n converged = margin_mu <= self._irl_precision\n return w_i, (margin_v, margin_mu, converged, mu_bar_im1)\n\n#\n# def train_mma(pi_0, phi_sa_dim, task_desc, params, D, evaluator, ob_space=None, ac_space=None):\n# gym.logger.setLevel(logging.WARN)\n#\n# gamma = task_desc[\"gamma\"]\n# horizon = task_desc[\"horizon\"]\n# eps = params[\"eps\"]\n# p = q = phi_sa_dim # adding action dim\n# phi = D[\"phi_fn\"]\n# phi_s = D[\"phi_fn_s\"]\n# stochastic = True\n# mu_estimator_type = params[\"mu_estimator\"]\n# n_action = task_desc[\"n_action\"]\n# assert isinstance(n_action, int)\n# action_list = range(n_action)\n# precision = params[\"precision\"]\n#\n# mu_exp_estimator = EmpiricalMuEstimator(phi, gamma)\n# mu_exp_estimator.fit(D, stochastic, return_s_init=True)\n# mu_exp, s_init_list = mu_exp_estimator.estimate()\n#\n#\n# logging.info(\"fitting {}\".format(mu_estimator_type))\n# if task_desc[\"type\"] == \"gym\":\n# env = gym.make(task_desc[\"env_id\"])\n# ac_space = env.action_space\n# ob_space = env.observation_space\n# mu_dim = p # only for discrete action\n# elif task_desc[\"type\"] == \"sepsis\":\n# if ac_space is None:\n# ac_space = (5, )\n# if ob_space is None:\n# ob_space = (46, )\n# mu_dim = p\n#\n# stochastic = True\n#\n# s = D[\"s\"]\n# a = D[\"a\"]\n# if len(a.shape) == 1:\n# a = np.expand_dims(a, axis=1)\n# s_next = D[\"s_next\"]\n# done = D[\"done\"]\n# if len(done.shape) == 1:\n# done = np.expand_dims(done, axis=1)\n# phi_sa = D[\"phi_sa\"]\n#\n# n_transition = D[\"s\"].shape[0]\n# idx = idx = int(n_transition * 0.7)\n#\n# D_train = {\"s\" : s[:idx, :],\n# \"a\" : a[:idx, :],\n# \"phi_sa\" : phi_sa[:idx, :],\n# \"s_next\": s_next[:idx, :],\n# \"done\": done[:idx, :]}\n#\n# D_val = {\"s\" : s[idx:, :],\n# \"a\" : a[idx:, :],\n# \"phi_sa\" : phi_sa[idx:, :],\n# \"s_next\": s_next[idx:, :],\n# \"done\": done[idx:, :]}\n#\n#\n# if mu_estimator_type == \"lstd\":\n# mu_estimator = LSTDMuEstimator(phi, gamma, D, p, q, eps, s_init_list)\n# elif mu_estimator_type == \"dsfn\":\n# mu_estimator = DeepMuEstimator(phi, gamma, D_train, D_val, s_init_list, ob_space,\n# ac_space, mu_dim, horizon)\n# else:\n# raise NotImplementedError\n#\n# mdp_solver = DQNSepsis(D=D_train)\n#\n# mma = MaxMarginAbbeel(pi_init=pi_0,\n# p=p,\n# phi=phi,\n# mu_exp=mu_exp,\n# mdp_solver=mdp_solver,\n# evaluator=evaluator,\n# irl_precision=params[\"precision\"],\n# method=params[\"method\"],\n# mu_estimator=mu_estimator,\n# stochastic=stochastic,\n# D_val=D_val)\n#\n# results = mma.run(n_iteration=params[\"n_iteration\"])\n# return results\n\n\n" ]
[ [ "numpy.sum", "numpy.abs", "numpy.min", "numpy.array", "numpy.linalg.norm" ] ]
HackaGeo2021UFF/well-tie-challenge
[ "63af3d9b4cc63e78f9ec31ee3d0e2b231e65b195" ]
[ "src/wellTie.py" ]
[ "import json\nimport welly\nfrom welly import Well\nimport pandas as pd\nimport lasio\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\nfrom src.waveletChoice import *\nfrom src.seismicManipulation import *\n\ndef read_inputs(jpath):\n \"\"\"\n read_inputs reads the input json file and stores it information in a dictionary\n\n Parameters\n ----------\n jpath : string\n the input JSON file\n\n Returns\n -------\n paths: dict\n Returns a dictionary of the json file\n\n \"\"\"\n with open(jpath) as file:\n paths = json.load(file)\n return paths\n\ndef read_data(ui):\n \"\"\"\n read_data reads the input data and stores it in a dictionary\n\n Parameters\n ----------\n ui: dict\n A dictionary of the user inputs\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n \n # read well .las\n well = Well.from_las(ui['well'])\n ui['uwi'] = well.header['uwi']\n ui['well_name'] = well.header['name']\n\n # read cube seismic\n\n # dado do desafio, usar somente no ambiente remoto\n tr_seis, t_seis = seismic_trace = extract_seismic_trace(ui['well'], ui['seismic'])\n t_seis = t_seis/1e3\n\n # dado de exemplo, pode usar na máquina pessoal\n #df = pd.read_csv(ui['seismic'])\n #tr_seis, t_seis = df.cdp409.to_numpy() , df.time.to_numpy()\n \n seismic = pd.DataFrame({'t':t_seis, 'tr_synth':np.zeros(len(tr_seis)), 'tr_seis':tr_seis})\n\n # read wavelet\n if ui['wavelet'] == \"\":\n wavelet = None\n else: \n # wavelet = pd.read.csv(ui['wavelet']) \n wavelet = None\n\n data = {'well':well,'seismic':seismic, 'wavelet':wavelet}\n return data\n\ndef pre_processing_data(data):\n \"\"\"\n pre_processing_data pre-process the well DT and RHOB data with operations as:\n * despike\n * smooth\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n \n data['well'].data['DT'] = np.nan_to_num(data['well'].data['DT'])\n data['well'].data['RHOB'] = np.nan_to_num(data['well'].data['RHOB-EDIT'])\n #data['well'].data['RHOB'] = np.nan_to_num(data['well'].data['RHOB'])\n\n\n #unit convert to µs/m\n data['well'].data['DT'] = data['well'].data['DT'] / 0.3048 \n #unit convert to kg/m3 \n #data['well'].data['RHOB'] = data['well'].data['RHOB-EDIT'] * 1000\n data['well'].data['RHOB'] = data['well'].data['RHOB'] * 1000\n\n #Despiking\n #Sonic Despiking\n dt = data['well'].data['DT']\n data['well'].data['DT_DS'] = dt.despike(window_length=50, z=2)\n\n #Density Despiking\n den = data['well'].data['RHOB']\n data['well'].data['RHOB_DS'] = den.despike(window_length=50, z=2)\n\n #Smoothing \n #Sonic Smoothing\n dt_ds = data['well'].data['DT_DS']\n data['well'].data['DT_DS_SM'] = dt_ds.smooth(window_length=10, samples=False)\n\n #Density Smoothing\n den_ds = data['well'].data['RHOB_DS']\n data['well'].data['RHOB_DS_SM'] = den_ds.smooth(window_length=10, samples=False)\n data['well'] = data['well'].df()\n return data\n\ndef time_depth_relationship(data, ui):\n \"\"\"\n time_depth_relationship creates the time-depth relationship from the sonic (DT) log\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n ui: dict\n Returns a dictionary of the user inputs\n\n \"\"\"\n ### just an exemple\n ### TO DO: become smart\n log_start = data['well'].index[0] # Depth of logging starts(m) from header\n kb = ui['kb'] # Kelly Bushing elevation(m) from header\n gap_int = log_start - kb\n v_water = 1500\n t_water_botton = ui['t_water_botton']\n log_start_time = t_water_botton + 2*(log_start - v_water*t_water_botton/2)*(np.array(data['well']['DT'])[0]/1e6) \n\n #first replace NaN values with zero\n dt = data['well']['DT']\n dt_iterval = dt * 0.1524 / 1e6\n t_cum = np.cumsum(dt_iterval) * 2\n data['well']['TWT'] = t_cum + log_start_time\n return data\n\ndef ai(data):\n \"\"\"\n ai creates the accoustic impedance log\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n # Sonic velocity calculate\n Vsonic = []\n for value in data['well']['DT_DS_SM']:\n if value == 0:\n Vsonic.append(0)\n else:\n Vsonic.append(1e6/value)\n\n data['well']['Vsonic'] = np.array(Vsonic) #(unit: m/s)\n # AI calculate\n data['well']['AI'] = data['well']['Vsonic'] * data['well']['RHOB_DS_SM'] #(unit: kg/m2.s)\n return data\n\ndef rc_time(data):\n \"\"\"\n rc_time creates the Reflectivity Coefficients log in the time-domain\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n AI_tdom = np.interp(x=data['seismic']['t'].to_numpy(), xp = data['well'].TWT.to_numpy(), fp = data['well'].AI.to_numpy()) #resampling\n\n # again Rc calulation but in reampled time domain\n Rc_tdom = np.zeros(len(AI_tdom))\n for i in range(len(AI_tdom)-1):\n dem = AI_tdom[i]+AI_tdom[i+1]\n if dem == 0:\n Rc_tdom[i] = 0\n else:\n Rc_tdom[i] = (AI_tdom[i+1]-AI_tdom[i])/dem\n # to adjust vector size copy the last element to the tail\n Rc_tdom[-1] = Rc_tdom[-2]\n \n i = 0\n while Rc_tdom[i] == 0 and i < len(Rc_tdom):\n i += 1\n Rc_tdom[i] = Rc_tdom[i+1]\n\n i = len(Rc_tdom)-1\n while Rc_tdom[i] == 0 and i > 0:\n i -= 1\n Rc_tdom[i] = Rc_tdom[i-1]\n \n data['well_tdom'] = pd.DataFrame()\n data['well_tdom']['t'] = data['seismic']['t']\n data['well_tdom']['Rc_tdom'] = Rc_tdom\n data['well_tdom']['AI_tdom'] = AI_tdom\n\n return data\n\ndef synthetic_seismogram(data):\n \"\"\"\n synthetic_seismogram creates the synthetic seismogram\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n\n if data['wavelet'] == None:\n cc, freq, roll, phase = best_wavelet(data)\n w = ricker(freq, phase, data)\n else:\n w = data['wavelet']\n \n Rc_tdom = np.roll(data['well_tdom']['Rc_tdom'], roll)\n data['seismic']['tr_synth'] = np.convolve(w, Rc_tdom, mode='same')\n return data\n\ndef normalization(data):\n \"\"\"\n normalization normalizes the synthetic and seismic signals\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n data['seismic']['tr_synth'] = data['seismic']['tr_synth']/np.max(data['seismic']['tr_synth'])\n data['seismic']['tr_seis'] = data['seismic']['tr_seis']/np.max(data['seismic']['tr_seis'])\n return data\n\ndef export_data(data, ui):\n \"\"\"\n export_data exports data in the Decision Workspace format\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n ui: dict\n A dictionary of the user inputs \n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n \n if 'outputs' not in os.listdir():\n os.mkdir('outputs')\n\n result_path = ui['well_name'].strip().replace(\"/\",\"_\")\n\n twt = data['well']['TWT'].to_numpy()*1000\n twt = np.insert(twt, 0, 0)\n depth = data['well'].index.to_numpy()\n depth = np.insert(depth, 0, 0)\n\n t = data['seismic']['t'].to_numpy()*1000\n new_depth = np.interp(t,twt,depth)\n amp = data['seismic']['tr_synth'].to_numpy()\n\n with open('outputs/'+result_path+'_TD.dat','w') as file:\n file.write('TDP1 '+ ui['uwi'] + '\\n')\n file.write('TDP2 ' + ui['well_name'] + '\\n')\n \n line = 'TDP3 ' + ui['td_name'] + ' '*70\n line = line[:73]\n line += ' 0 TVDBTDD\\n'\n file.write(line)\n \n n = len(t)\n for i in range(n):\n line = 'TDP5 %.6f '%t[i]\n line = line[:21] \n line += '%.5f\\n'%new_depth[i]\n file.write(line)\n \n with open('outputs/'+result_path+'_synth.dat','w') as file:\n file.write('SYN1 '+ ui['uwi'] + '\\n')\n file.write('SYN2 ' + ui['well_name'] + '\\n')\n\n line = 'SYN3 ' + ui['synth_name'] + ' '*30\n line = line[:70]\n line += '4.0\\n'\n file.write(line)\n \n \n n = len(t)\n for i in range(n):\n line = 'SYN7 %.6f '%t[i]\n line = line[:28] \n line += '%.6f\\n'%amp[i]\n file.write(line)\n\n return None\n" ]
[ [ "numpy.roll", "numpy.cumsum", "numpy.interp", "pandas.DataFrame", "numpy.insert", "numpy.max", "numpy.array", "numpy.convolve", "numpy.nan_to_num" ] ]
Aniruddha-Tapas/seq2seq
[ "1592b842b652ae648b96c164bead38eb089ce08e" ]
[ "seq2seq/contrib/seq2seq/helper.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nIMPORTANT: This code is taken directly from Tensorflow\n(https://github.com/tensorflow/tensorflow) and is copied temporarily\nuntil it is available in a packaged Tensorflow version on pypi.\n\nTODO(dennybritz): Delete this code when it becomes available in TF.\n\nA library of helpers for use with SamplingDecoders.\n\"\"\"\n\n# pylint: skip-file\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.contrib.distributions.python.ops import categorical\n# from tensorflow.contrib.seq2seq.python.ops import decoder\nfrom seq2seq.contrib.seq2seq import decoder\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.util import nest\n\n__all__ = [\n \"Helper\",\n \"TrainingHelper\",\n \"GreedyEmbeddingHelper\",\n \"CustomHelper\",\n \"ScheduledEmbeddingTrainingHelper\",\n]\n\n_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Helper(object):\n \"\"\"Helper interface. Helper instances are used by SamplingDecoder.\"\"\"\n\n @abc.abstractproperty\n def batch_size(self):\n \"\"\"Returns a scalar int32 tensor.\"\"\"\n raise NotImplementedError(\"batch_size has not been implemented\")\n\n @abc.abstractmethod\n def initialize(self, name=None):\n \"\"\"Returns `(initial_finished, initial_inputs)`.\"\"\"\n pass\n\n @abc.abstractmethod\n def sample(self, time, outputs, state, name=None):\n \"\"\"Returns `sample_ids`.\"\"\"\n pass\n\n @abc.abstractmethod\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n \"\"\"Returns `(finished, next_inputs, next_state)`.\"\"\"\n pass\n\n\nclass CustomHelper(Helper):\n \"\"\"Base abstract class that allows the user to customize sampling.\"\"\"\n\n def __init__(self, initialize_fn, sample_fn, next_inputs_fn):\n \"\"\"Initializer.\n\n Args:\n initialize_fn: callable that returns `(finished, next_inputs)`\n for the first iteration.\n sample_fn: callable that takes `(time, outputs, state)`\n and emits tensor `sample_ids`.\n next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`\n and emits `(finished, next_inputs, next_state)`.\n \"\"\"\n self._initialize_fn = initialize_fn\n self._sample_fn = sample_fn\n self._next_inputs_fn = next_inputs_fn\n self._batch_size = None\n\n @property\n def batch_size(self):\n if self._batch_size is None:\n raise ValueError(\"batch_size accessed before initialize was called\")\n return self._batch_size\n\n def initialize(self, name=None):\n with ops.name_scope(name, \"%sInitialize\" % type(self).__name__):\n (finished, next_inputs) = self._initialize_fn()\n if self._batch_size is None:\n self._batch_size = array_ops.size(finished)\n return (finished, next_inputs)\n\n def sample(self, time, outputs, state, name=None):\n with ops.name_scope(name, \"%sSample\" % type(self).__name__,\n (time, outputs, state)):\n return self._sample_fn(time=time, outputs=outputs, state=state)\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n with ops.name_scope(name, \"%sNextInputs\" % type(self).__name__,\n (time, outputs, state)):\n return self._next_inputs_fn(\n time=time, outputs=outputs, state=state, sample_ids=sample_ids)\n\n\nclass TrainingHelper(Helper):\n \"\"\"A helper for use during training. Only reads inputs.\n\n Returned sample_ids are the argmax of the RNN output logits.\n \"\"\"\n\n def __init__(self, inputs, sequence_length, time_major=False, name=None):\n \"\"\"Initializer.\n\n Args:\n inputs: A (structure of) input tensors.\n sequence_length: An int32 vector tensor.\n time_major: Python bool. Whether the tensors in `inputs` are time major.\n If `False` (default), they are assumed to be batch major.\n name: Name scope for any created operations.\n\n Raises:\n ValueError: if `sequence_length` is not a 1D tensor.\n \"\"\"\n with ops.name_scope(name, \"TrainingHelper\", [inputs, sequence_length]):\n inputs = ops.convert_to_tensor(inputs, name=\"inputs\")\n if not time_major:\n inputs = nest.map_structure(_transpose_batch_time, inputs)\n\n def _unstack_ta(inp):\n return tensor_array_ops.TensorArray(\n dtype=inp.dtype,\n size=array_ops.shape(inp)[0],\n element_shape=inp.get_shape()[1:]).unstack(inp)\n\n self._input_tas = nest.map_structure(_unstack_ta, inputs)\n self._sequence_length = ops.convert_to_tensor(\n sequence_length, name=\"sequence_length\")\n if self._sequence_length.get_shape().ndims != 1:\n raise ValueError(\n \"Expected sequence_length to be a vector, but received shape: %s\" %\n self._sequence_length.get_shape())\n\n self._zero_inputs = nest.map_structure(\n lambda inp: array_ops.zeros_like(inp[0, :]), inputs)\n\n self._batch_size = array_ops.size(sequence_length)\n\n @property\n def batch_size(self):\n return self._batch_size\n\n def initialize(self, name=None):\n with ops.name_scope(name, \"TrainingHelperInitialize\"):\n finished = math_ops.equal(0, self._sequence_length)\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: self._zero_inputs,\n lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))\n return (finished, next_inputs)\n\n def sample(self, time, outputs, name=None, **unused_kwargs):\n with ops.name_scope(name, \"TrainingHelperSample\", [time, outputs]):\n sample_ids = math_ops.cast(\n math_ops.argmax(\n outputs, axis=-1), dtypes.int32)\n return sample_ids\n\n def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):\n \"\"\"next_inputs_fn for TrainingHelper.\"\"\"\n with ops.name_scope(name, \"TrainingHelperNextInputs\",\n [time, outputs, state]):\n next_time = time + 1\n finished = (next_time >= self._sequence_length)\n all_finished = math_ops.reduce_all(finished)\n\n def read_from_ta(inp):\n return inp.read(next_time)\n\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: self._zero_inputs,\n lambda: nest.map_structure(read_from_ta, self._input_tas))\n return (finished, next_inputs, state)\n\n\nclass ScheduledEmbeddingTrainingHelper(TrainingHelper):\n \"\"\"A training helper that adds scheduled sampling.\n\n Returns -1s for sample_ids where no sampling took place; valid sample id\n values elsewhere.\n \"\"\"\n\n def __init__(self,\n inputs,\n sequence_length,\n embedding,\n sampling_probability,\n time_major=False,\n seed=None,\n scheduling_seed=None,\n name=None):\n \"\"\"Initializer.\n\n Args:\n inputs: A (structure of) input tensors.\n sequence_length: An int32 vector tensor.\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\n or the `params` argument for `embedding_lookup`.\n sampling_probability: A 0D `float32` tensor: the probability of sampling\n categorically from the output ids instead of reading directly from the\n inputs.\n time_major: Python bool. Whether the tensors in `inputs` are time major.\n If `False` (default), they are assumed to be batch major.\n seed: The sampling seed.\n scheduling_seed: The schedule decision rule sampling seed.\n name: Name scope for any created operations.\n\n Raises:\n ValueError: if `sampling_probability` is not a scalar or vector.\n \"\"\"\n with ops.name_scope(name, \"ScheduledEmbeddingSamplingWrapper\",\n [embedding, sampling_probability]):\n if callable(embedding):\n self._embedding_fn = embedding\n else:\n self._embedding_fn = (\n lambda ids: embedding_ops.embedding_lookup(embedding, ids))\n self._sampling_probability = ops.convert_to_tensor(\n sampling_probability, name=\"sampling_probability\")\n if self._sampling_probability.get_shape().ndims not in (0, 1):\n raise ValueError(\n \"sampling_probability must be either a scalar or a vector. \"\n \"saw shape: %s\" % (self._sampling_probability.get_shape()))\n self._seed = seed\n self._scheduling_seed = scheduling_seed\n super(ScheduledEmbeddingTrainingHelper, self).__init__(\n inputs=inputs,\n sequence_length=sequence_length,\n time_major=time_major,\n name=name)\n\n def initialize(self, name=None):\n return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)\n\n def sample(self, time, outputs, state, name=None):\n with ops.name_scope(name, \"ScheduledEmbeddingTrainingHelperSample\",\n [time, outputs, state]):\n # Return -1s where we did not sample, and sample_ids elsewhere\n select_sample_noise = random_ops.random_uniform(\n [self.batch_size], seed=self._scheduling_seed)\n select_sample = (self._sampling_probability > select_sample_noise)\n sample_id_sampler = categorical.Categorical(logits=outputs)\n return array_ops.where(\n select_sample,\n sample_id_sampler.sample(seed=self._seed),\n array_ops.tile([-1], [self.batch_size]))\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n with ops.name_scope(name, \"ScheduledEmbeddingTrainingHelperSample\",\n [time, outputs, state, sample_ids]):\n (finished, base_next_inputs, state) = (\n super(ScheduledEmbeddingTrainingHelper, self).next_inputs(\n time=time,\n outputs=outputs,\n state=state,\n sample_ids=sample_ids,\n name=name))\n\n def maybe_sample():\n \"\"\"Perform scheduled sampling.\"\"\"\n where_sampling = math_ops.cast(\n array_ops.where(sample_ids > -1), dtypes.int32)\n where_not_sampling = math_ops.cast(\n array_ops.where(sample_ids <= -1), dtypes.int32)\n where_sampling_flat = array_ops.reshape(where_sampling, [-1])\n where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])\n sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)\n inputs_not_sampling = array_ops.gather(base_next_inputs,\n where_not_sampling_flat)\n sampled_next_inputs = self._embedding_fn(sample_ids_sampling)\n base_shape = array_ops.shape(base_next_inputs)\n return (array_ops.scatter_nd(\n indices=where_sampling,\n updates=sampled_next_inputs,\n shape=base_shape) + array_ops.scatter_nd(\n indices=where_not_sampling,\n updates=inputs_not_sampling,\n shape=base_shape))\n\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: base_next_inputs, maybe_sample)\n return (finished, next_inputs, state)\n\n\nclass GreedyEmbeddingHelper(Helper):\n \"\"\"A helper for use during inference.\n\n Uses the argmax of the output (treated as logits) and passes the\n result through an embedding layer to get the next input.\n \"\"\"\n\n def __init__(self, embedding, start_tokens, end_token):\n \"\"\"Initializer.\n\n Args:\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\n or the `params` argument for `embedding_lookup`.\n start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.\n end_token: `int32` scalar, the token that marks end of decoding.\n\n Raises:\n ValueError: if `sequence_length` is not a 1D tensor.\n \"\"\"\n if callable(embedding):\n self._embedding_fn = embedding\n else:\n self._embedding_fn = (\n lambda ids: embedding_ops.embedding_lookup(embedding, ids))\n\n self._start_tokens = ops.convert_to_tensor(\n start_tokens, dtype=dtypes.int32, name=\"start_tokens\")\n self._end_token = ops.convert_to_tensor(\n end_token, dtype=dtypes.int32, name=\"end_token\")\n if self._start_tokens.get_shape().ndims != 1:\n raise ValueError(\"start_tokens must be a vector\")\n self._batch_size = array_ops.size(start_tokens)\n if self._end_token.get_shape().ndims != 0:\n raise ValueError(\"end_token must be a scalar\")\n self._start_inputs = self._embedding_fn(self._start_tokens)\n\n @property\n def batch_size(self):\n return self._batch_size\n\n def initialize(self, name=None):\n finished = array_ops.tile([False], [self._batch_size])\n return (finished, self._start_inputs)\n\n def sample(self, time, outputs, state, name=None):\n \"\"\"sample for GreedyEmbeddingHelper.\"\"\"\n del time, state # unused by sample_fn\n # Outputs are logits, use argmax to get the most probable id\n if not isinstance(outputs, ops.Tensor):\n raise TypeError(\"Expected outputs to be a single Tensor, got: %s\" %\n outputs)\n sample_ids = math_ops.cast(math_ops.argmax(outputs, axis=-1), dtypes.int32)\n return sample_ids\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n \"\"\"next_inputs_fn for GreedyEmbeddingHelper.\"\"\"\n del time, outputs # unused by next_inputs_fn\n finished = math_ops.equal(sample_ids, self._end_token)\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished,\n # If we're finished, the next_inputs value doesn't matter\n lambda: self._start_inputs,\n lambda: self._embedding_fn(sample_ids))\n return (finished, next_inputs, state)\n" ]
[ [ "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.util.nest.map_structure", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.embedding_ops.embedding_lookup", "tensorflow.contrib.distributions.python.ops.categorical.Categorical", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.tile", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.ops.math_ops.argmax", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.array_ops.size", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.scatter_nd", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.array_ops.reshape" ] ]
starcatch1/book
[ "3d4477c2e624e291b2081b944c9589b976211dcf" ]
[ "ch19/day06/07.py" ]
[ "import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ndef get_financial_statements(code):\n url = \"http://companyinfo.stock.naver.com/v1/company/ajax/cF1001.aspx?cmp_cd=%s&fin_typ=0&freq_typ=Y\" % (code)\n html = requests.get(url).text\n\n html = html.replace('<th class=\"bg r01c02 endLine line-bottom\"colspan=\"8\">연간</th>', \"\")\n html = html.replace(\"<span class='span-sub'>(IFRS연결)</span>\", \"\")\n html = html.replace(\"<span class='span-sub'>(IFRS별도)</span>\", \"\")\n html = html.replace(\"<span class='span-sub'>(GAAP개별)</span>\", \"\")\n html = html.replace('\\t', '')\n html = html.replace('\\n', '')\n html = html.replace('\\r', '')\n\n for year in range(2009, 2018):\n for month in range(6, 13):\n month = \"/%02d\" % month\n html = html.replace(str(year) + month, str(year))\n\n for month in range(1, 6):\n month = \"/%02d\" % month\n html = html.replace(str(year+1) + month, str(year))\n\n html = html.replace(str(year) + '(E)', str(year))\n\n df_list = pd.read_html(html, index_col='주요재무정보')\n df = df_list[0]\n return df\n\ndef get_3year_treasury():\n url = \"http://www.index.go.kr/strata/jsp/showStblGams3.jsp?stts_cd=288401&amp;idx_cd=2884&amp;freq=Y&amp;period=1998:2016\"\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'lxml')\n tr_data = soup.find_all('tr', id='tr_288401_1')\n td_data = tr_data[0].find_all('td')\n\n treasury_3year = {}\n start_year = 1998\n\n for x in td_data:\n treasury_3year[start_year] = x.text\n start_year += 1\n\n print(treasury_3year)\n return treasury_3year\n\nif __name__ == \"__main__\":\n #df = get_financial_statements('035720')\n #print(df)\n get_3year_treasury()\n\n\n" ]
[ [ "pandas.read_html" ] ]
rpindale/pytorch
[ "6a085648d81ce88ff59d6d1438fdb3707a0d6fb7" ]
[ "test/quantization/core/test_workflow_module.py" ]
[ "# Torch\nimport torch\nfrom torch.quantization import (\n MinMaxObserver,\n PerChannelMinMaxObserver,\n MovingAverageMinMaxObserver,\n MovingAveragePerChannelMinMaxObserver,\n HistogramObserver,\n RecordingObserver,\n PlaceholderObserver,\n NoopObserver,\n FakeQuantize,\n FixedQParamsFakeQuantize,\n default_debug_qconfig,\n default_observer,\n default_histogram_observer,\n default_per_channel_weight_observer,\n get_observer_dict,\n prepare,\n QConfig,\n)\n\n\nimport torch.nn as nn\n\n# Standard library\nimport copy\nimport io\nimport itertools\nimport unittest\nimport math\nimport numpy as np\n\n# Testing utils\nfrom hypothesis import given, settings\nfrom hypothesis import strategies as st\nimport torch.testing._internal.hypothesis_utils as hu\nhu.assert_deadline_disabled()\nfrom torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA\nfrom torch.testing._internal.common_utils import TestCase\nfrom torch.testing._internal.common_quantization import (\n QuantizationTestCase,\n AnnotatedSingleLayerLinearModel,\n test_only_eval_fn,\n SingleLayerLinearModel,\n)\n\nfrom torch.testing._internal.common_quantized import (\n override_quantized_engine,\n supported_qengines,\n override_qengines,\n _fake_quantize_per_channel_affine_reference,\n _fake_quantize_per_channel_affine_grad_reference,\n to_tensor,\n)\n\nNP_RANDOM_SEED = 19\ntolerance = 1e-6\n\nclass TestObserver(QuantizationTestCase):\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),\n reduce_range=st.booleans())\n def test_per_tensor_observers(self, qdtype, qscheme, reduce_range):\n # reduce_range cannot be true for symmetric quantization with uint8\n if qdtype == torch.quint8 and qscheme == torch.per_tensor_symmetric:\n reduce_range = False\n ObserverList = [MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range),\n MovingAverageMinMaxObserver(averaging_constant=0.5,\n dtype=qdtype,\n qscheme=qscheme,\n reduce_range=reduce_range)]\n for myobs in ObserverList:\n # Calculate Qparams should return with a warning for observers with no data\n qparams = myobs.calculate_qparams()\n if type(myobs) == MinMaxObserver:\n x = torch.tensor([1.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n y = torch.tensor([4.0, 5.0, 5.0, 6.0, 7.0, 8.0])\n else:\n # Moving average of min/max for x and y matches that of\n # extreme values for x/y used for minmax observer\n x = torch.tensor([0.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n y = torch.tensor([2.0, 5.0, 5.0, 6.0, 7.0, 10.0])\n\n result = myobs(x)\n result = myobs(y)\n self.assertEqual(result, y)\n self.assertEqual(myobs.min_val, 1.0)\n self.assertEqual(myobs.max_val, 8.0)\n qparams = myobs.calculate_qparams()\n if reduce_range:\n if qscheme == torch.per_tensor_symmetric:\n ref_scale = 0.062745 * 255 / 127\n ref_zero_point = 0 if qdtype is torch.qint8 else 128\n else:\n ref_scale = 0.0313725 * 255 / 127\n ref_zero_point = -64 if qdtype is torch.qint8 else 0\n else:\n if qscheme == torch.per_tensor_symmetric:\n ref_scale = 0.062745\n ref_zero_point = 0 if qdtype is torch.qint8 else 128\n else:\n ref_scale = 0.0313725\n ref_zero_point = -128 if qdtype is torch.qint8 else 0\n self.assertEqual(qparams[1].item(), ref_zero_point)\n self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)\n state_dict = myobs.state_dict()\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_dict[key])\n loaded_obs = MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)\n loaded_obs.load_state_dict(loaded_dict)\n loaded_qparams = loaded_obs.calculate_qparams()\n self.assertEqual(myobs.min_val, loaded_obs.min_val)\n self.assertEqual(myobs.max_val, loaded_obs.max_val)\n self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())\n\n\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from((torch.per_channel_affine, torch.per_channel_symmetric, torch.per_channel_affine_float_qparams)),\n ch_axis=st.sampled_from((0, 1, 2, 3)), reduce_range=st.booleans())\n def test_per_channel_observers(self, qdtype, qscheme, ch_axis, reduce_range):\n # reduce_range cannot be true for symmetric quantization with uint8\n if qscheme == torch.per_channel_affine_float_qparams:\n reduce_range = False\n if qdtype == torch.quint8 and qscheme == torch.per_channel_symmetric:\n reduce_range = False\n ObserverList = [PerChannelMinMaxObserver(reduce_range=reduce_range,\n ch_axis=ch_axis,\n dtype=qdtype,\n qscheme=qscheme),\n MovingAveragePerChannelMinMaxObserver(averaging_constant=0.5,\n reduce_range=reduce_range,\n ch_axis=ch_axis,\n dtype=qdtype,\n qscheme=qscheme)]\n\n for myobs in ObserverList:\n # Calculate qparams should work for empty observers\n qparams = myobs.calculate_qparams()\n x = torch.tensor(\n [\n [[[1.0, 2.0], [2.0, 2.5]], [[3.0, 4.0], [4.5, 6.0]]],\n [[[-4.0, -3.0], [5.0, 5.0]], [[6.0, 3.0], [7.0, 8.0]]],\n ]\n )\n if type(myobs) == MovingAveragePerChannelMinMaxObserver:\n # Scaling the input tensor to model change in min/max values\n # across batches\n result = myobs(0.5 * x)\n result = myobs(1.5 * x)\n self.assertEqual(result, 1.5 * x)\n else:\n result = myobs(x)\n self.assertEqual(result, x)\n\n qparams = myobs.calculate_qparams()\n ref_min_vals = [[1.0, -4.0], [-4.0, 3.0], [-4.0, 2.0], [-4.0, -3.0]]\n ref_max_vals = [[6.0, 8.0], [5.0, 8.0], [6.0, 8.0], [7.0, 8.0]]\n per_channel_symmetric_ref_scales = [\n [0.04705882, 0.06274509],\n [0.03921569, 0.0627451],\n [0.04705882, 0.0627451],\n [0.05490196, 0.0627451],\n ]\n per_channel_affine_ref_scales = [\n [0.02352941, 0.04705882],\n [0.03529412, 0.03137255],\n [0.03921569, 0.03137255],\n [0.04313726, 0.04313726],\n ]\n per_channel_affine_qint8_zp = [\n [-128, -43],\n [-15, -128],\n [-26, -128],\n [-35, -58],\n ]\n per_channel_affine_float_qparams_ref_scales = [\n [0.0196, 0.0471],\n [0.0353, 0.0196],\n [0.0392, 0.0235],\n [0.0431, 0.0431],\n ]\n per_channel_affine_quint8_zp = [[0, 85], [113, 0], [102, 0], [93, 70]]\n\n self.assertEqual(myobs.min_vals, ref_min_vals[ch_axis])\n self.assertEqual(myobs.max_vals, ref_max_vals[ch_axis])\n if qscheme == torch.per_channel_symmetric:\n ref_scales = per_channel_symmetric_ref_scales[ch_axis]\n ref_zero_points = [0, 0] if qdtype is torch.qint8 else [128, 128]\n elif qscheme == torch.per_channel_affine_float_qparams:\n ref_scales = per_channel_affine_float_qparams_ref_scales[ch_axis]\n ref_zero_points = [-1 * ref_min_vals[ch_axis][i] / ref_scales[i] for i in range(len(ref_scales))]\n else:\n ref_scales = per_channel_affine_ref_scales[ch_axis]\n ref_zero_points = (\n per_channel_affine_qint8_zp[ch_axis]\n if qdtype is torch.qint8\n else per_channel_affine_quint8_zp[ch_axis]\n )\n\n if reduce_range:\n ref_scales = [s * 255 / 127 for s in ref_scales]\n ref_zero_points = [math.floor(z / 2) for z in ref_zero_points]\n self.assertTrue(torch.allclose(qparams[0], torch.tensor(ref_scales, dtype=qparams[0].dtype), atol=0.0001))\n if qscheme == torch.per_channel_affine_float_qparams:\n self.assertTrue(torch.allclose(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype), atol=1))\n else:\n self.assertTrue(torch.allclose(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype)))\n\n\n # Test for serializability\n state_dict = myobs.state_dict()\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_dict[key])\n loaded_obs = PerChannelMinMaxObserver(reduce_range=reduce_range, ch_axis=ch_axis, dtype=qdtype, qscheme=qscheme)\n loaded_obs.load_state_dict(loaded_dict)\n loaded_qparams = loaded_obs.calculate_qparams()\n self.assertEqual(myobs.min_vals, loaded_obs.min_vals)\n self.assertEqual(myobs.max_vals, loaded_obs.max_vals)\n self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())\n\n\n def test_observer_scriptable(self):\n obs_list = [MinMaxObserver(), MovingAverageMinMaxObserver()]\n for obs in obs_list:\n scripted = torch.jit.script(obs)\n\n x = torch.rand(3, 4)\n obs(x)\n scripted(x)\n self.assertEqual(obs.calculate_qparams(), scripted.calculate_qparams())\n\n buf = io.BytesIO()\n torch.jit.save(scripted, buf)\n buf.seek(0)\n loaded = torch.jit.load(buf)\n self.assertEqual(obs.calculate_qparams(), loaded.calculate_qparams())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @override_qengines\n def test_state_dict_respects_device_affinity(self):\n \"\"\"\n Tests that loading from a state dict loads buffers to the correct\n device.\n \"\"\"\n device_cpu = torch.device('cpu')\n device_cuda = torch.device('cuda:0')\n test_cases = itertools.product(\n [device_cpu, device_cuda],\n [device_cpu, device_cuda],\n [MinMaxObserver, MovingAverageMinMaxObserver,\n PerChannelMinMaxObserver,\n MovingAveragePerChannelMinMaxObserver,\n # TODO: enable this (separate PR)\n # HistogramObserver,\n PlaceholderObserver, RecordingObserver, NoopObserver,\n FakeQuantize])\n\n for device_source, device_target, obs_cls in test_cases:\n # calibrated source model\n model = obs_cls()\n model.to(device_source)\n model(torch.randn(4, 1, 4, 4, device=device_source))\n # target model\n model2 = obs_cls()\n model2.to(device_target)\n model2.load_state_dict(model.state_dict())\n # verify that buffers stayed on model2's device\n model_devices = {p.device for p in model2.parameters()} | \\\n {p.device for p in model2.buffers()}\n # some observers do not have any buffers, so lessEqual instead of\n # Equal\n self.assertLessEqual(len(model_devices), 1)\n if len(model_devices) == 1:\n model_device = next(iter(model_devices))\n self.assertEqual(model_device, device_target)\n\n def test_histogram_observer_consistent_buffer_shape(self):\n \"\"\"\n Ensures that the buffer shapes do not change from uninitialized to\n initialized states for HistogramObserver.\n \"\"\"\n obs = HistogramObserver()\n min_shape_before = obs.min_val.shape\n max_shape_before = obs.max_val.shape\n for _ in range(2):\n obs(torch.randn(4, 4, 4, 4))\n self.assertEqual(min_shape_before, obs.min_val.shape)\n self.assertEqual(max_shape_before, obs.max_val.shape)\n\n def test_histogram_observer_save_load_state_dict(self):\n \"\"\"\n Smoke test on saving/loading state_dict\n \"\"\"\n obs1 = HistogramObserver()\n obs1(torch.randn(4, 4, 4, 4))\n obs2 = HistogramObserver()\n obs2.load_state_dict(obs1.state_dict())\n self.assertEqual(obs2.min_val.shape, torch.Size([]))\n self.assertEqual(obs2.max_val.shape, torch.Size([]))\n\n\n def test_save_load_state_dict_script(self):\n \"\"\"\n Tests that we can save and load state_dict for observers that are scripted\n in a quantized model.\n \"\"\"\n obs_list = [MinMaxObserver, MovingAverageMinMaxObserver,\n PerChannelMinMaxObserver,\n MovingAveragePerChannelMinMaxObserver, HistogramObserver]\n\n for obs in obs_list:\n model = SingleLayerLinearModel().eval()\n qconfig = QConfig(activation=default_observer, weight=obs)\n qconfig_dict = {'' : qconfig}\n scripted = torch.jit.script(model)\n scripted = torch.quantization.prepare_jit(scripted, qconfig_dict)\n x = torch.rand(5, 5)\n scripted(x)\n obs_dict = torch.quantization.get_observer_state_dict(scripted)\n\n # Load stats\n scripted_2 = torch.jit.script(model)\n scripted_2 = torch.quantization.prepare_jit(scripted_2, qconfig_dict)\n torch.quantization.load_observer_state_dict(scripted_2, obs_dict)\n # Verify that state_dict matches exactly with original one.\n self.assertEqual(scripted.state_dict(), scripted_2.state_dict())\n\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_observer_qparams_respects_device_affinity(self):\n \"\"\"\n Ensure that the scale and zero_point returned by the observer\n are on the same device as the input tensor.\n \"\"\"\n observerList = [MinMaxObserver(),\n MovingAverageMinMaxObserver(),\n PerChannelMinMaxObserver(),\n MovingAveragePerChannelMinMaxObserver()]\n for obs in observerList:\n device = torch.device('cuda:1')\n x = torch.randn(1, 2, device=device)\n obs.to(device)\n result = obs(x)\n scale, zero_point = obs.calculate_qparams()\n\n self.assertEqual(x.device, scale.device)\n self.assertEqual(x.device, zero_point.device)\n\n def test_zero_numel(self):\n obs_list = [MinMaxObserver, MovingAverageMinMaxObserver,\n PerChannelMinMaxObserver,\n MovingAveragePerChannelMinMaxObserver, HistogramObserver,\n FakeQuantize, FixedQParamsFakeQuantize]\n for obs_cls in obs_list:\n if obs_cls is FixedQParamsFakeQuantize:\n obs = obs_cls(0.1, 0)\n else:\n obs = obs_cls()\n x = torch.tensor([])\n # verify no crash\n x = obs(x)\n\n\n# HistogramObserver that works like it does on master\nclass _ReferenceHistogramObserver(HistogramObserver):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @torch.jit.ignore\n def _non_linear_param_search(self):\n r\"\"\"Non-linear parameter search.\n\n An approximation for L2 error minimization for selecting min/max.\n By selecting new min/max, we filter out outliers in input distribution.\n This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in\n caffe2/quantization/server/norm_minimization.cc\n \"\"\"\n def _get_norm(delta_begin, delta_end, density, norm_type):\n r\"\"\"\n Compute the norm of the values uniformaly distributed between\n delta_begin and delta_end.\n\n norm = density * (integral_{begin, end} x^2)\n = density * (end^3 - begin^3) / 3\n \"\"\"\n assert norm_type == \"L2\", \"Only L2 norms are currently supported\"\n norm = 0.0\n if norm_type == \"L2\":\n norm = (\n delta_end * delta_end * delta_end\n - delta_begin * delta_begin * delta_begin\n ) / 3\n return density * norm\n\n def _compute_quantization_error(next_start_bin, next_end_bin, norm_type):\n r\"\"\"\n Compute the quantization error if we use start_bin to end_bin as the\n min and max to do the quantization.\n \"\"\"\n bin_width = (self.max_val.item() - self.min_val.item()) / self.bins\n\n norm = 0.0\n dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins\n if dst_bin_width == 0.0:\n return 0.0\n for src_bin in range(self.bins):\n # distances from the beginning of first dst_bin to the beginning and\n # end of src_bin\n src_bin_begin = (src_bin - next_start_bin) * bin_width\n src_bin_end = src_bin_begin + bin_width\n\n # which dst_bins the beginning and end of src_bin belong to?\n dst_bin_of_begin = min(\n self.dst_nbins - 1, max(0.0, math.floor(src_bin_begin / dst_bin_width))\n )\n dst_bin_of_end = min(\n self.dst_nbins - 1, max(0.0, math.floor(src_bin_end / dst_bin_width))\n )\n dst_bin_of_begin_center = (\n dst_bin_of_begin * dst_bin_width + dst_bin_width / 2\n )\n\n density = self.histogram[src_bin] / bin_width\n if dst_bin_of_begin == dst_bin_of_end:\n # if src_bin is entirely within 1 dst_bin\n delta_begin = src_bin_begin - dst_bin_of_begin_center\n delta_end = src_bin_end - dst_bin_of_begin_center\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n else:\n delta_begin = src_bin_begin - dst_bin_of_begin_center\n delta_end = dst_bin_width / 2\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n\n norm = norm + (dst_bin_of_end - dst_bin_of_begin - 1) * _get_norm(\n -dst_bin_width / 2, dst_bin_width / 2, density, norm_type\n )\n\n dst_bin_of_end_center = (\n dst_bin_of_end * dst_bin_width + dst_bin_width / 2\n )\n\n delta_begin = -dst_bin_width / 2\n delta_end = src_bin_end - dst_bin_of_end_center\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n return norm\n\n assert self.histogram.size()[0] == self.bins, \"bins mistmatch\"\n bin_width = (self.max_val - self.min_val) / self.bins\n\n # cumulative sum\n total = sum(self.histogram)\n cSum = torch.cumsum(self.histogram, dim=0)\n\n stepsize = 1e-5 # granularity\n alpha = 0.0 # lower bound\n beta = 1.0 # upper bound\n start_bin = 0\n end_bin = self.bins - 1\n norm_min = float(\"inf\")\n\n while alpha < beta:\n # Find the next step\n next_alpha = alpha + stepsize\n next_beta = beta - stepsize\n\n # find the left and right bins between the quantile bounds\n l = start_bin\n r = end_bin\n while l < end_bin and cSum[l] < next_alpha * total:\n l = l + 1\n while r > start_bin and cSum[r] > next_beta * total:\n r = r - 1\n\n # decide the next move\n next_start_bin = start_bin\n next_end_bin = end_bin\n if (l - start_bin) > (end_bin - r):\n # move the start bin\n next_start_bin = l\n alpha = next_alpha\n else:\n # move the end bin\n next_end_bin = r\n beta = next_beta\n\n if next_start_bin == start_bin and next_end_bin == end_bin:\n continue\n\n # calculate the quantization error using next_start_bin and next_end_bin\n norm = _compute_quantization_error(next_start_bin, next_end_bin, \"L2\")\n\n if norm > norm_min:\n break\n norm_min = norm\n start_bin = next_start_bin\n end_bin = next_end_bin\n\n new_min = self.min_val + bin_width * start_bin\n new_max = self.min_val + bin_width * (end_bin + 1)\n return new_min, new_max\n\nclass TestRecordHistogramObserver(QuantizationTestCase):\n # TODO: move this to quantize.py\n def test_record_observer(self):\n for qengine in supported_qengines:\n with override_quantized_engine(qengine):\n model = AnnotatedSingleLayerLinearModel()\n model.qconfig = default_debug_qconfig\n model = prepare(model)\n # run the evaluation and dump all tensors\n test_only_eval_fn(model, self.calib_data)\n test_only_eval_fn(model, self.calib_data)\n observer_dict = {}\n get_observer_dict(model, observer_dict)\n\n self.assertTrue('fc1.module.activation_post_process' in observer_dict.keys(),\n 'observer is not recorded in the dict')\n self.assertEqual(len(observer_dict['fc1.module.activation_post_process'].get_tensor_value()),\n 2 * len(self.calib_data))\n self.assertEqual(observer_dict['fc1.module.activation_post_process'].get_tensor_value()[0],\n model(self.calib_data[0][0]))\n\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)))\n def test_observer_scriptable(self, qdtype, qscheme):\n obs = RecordingObserver(dtype=qdtype, qscheme=qscheme)\n scripted = torch.jit.script(obs)\n\n x = torch.rand(3, 4)\n obs(x)\n scripted(x)\n self.assertTrue(torch.equal(obs.get_tensor_value()[0], scripted.get_tensor_value()[0]))\n buf = io.BytesIO()\n torch.jit.save(scripted, buf)\n buf.seek(0)\n loaded = torch.jit.load(buf)\n self.assertTrue(torch.equal(obs.get_tensor_value()[0], loaded.get_tensor_value()[0]))\n\nclass TestHistogramObserver(QuantizationTestCase):\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from(\n (torch.per_tensor_affine, torch.per_tensor_symmetric))\n )\n def test_observer_scriptable(self, qdtype, qscheme):\n ob_list = [\n HistogramObserver(dtype=qdtype, qscheme=qscheme),\n default_histogram_observer()\n ]\n for obs in ob_list:\n scripted = torch.jit.script(obs)\n\n x = torch.rand(3, 4)\n obs(x)\n scripted(x)\n self.assertTrue(torch.equal(obs.histogram, scripted.histogram))\n buf = io.BytesIO()\n torch.jit.save(scripted, buf)\n buf.seek(0)\n loaded = torch.jit.load(buf)\n self.assertTrue(torch.equal(obs.histogram, scripted.histogram))\n\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),\n reduce_range=st.booleans())\n @settings(max_examples=10)\n def test_histogram_observer(self, qdtype, qscheme, reduce_range):\n myobs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)\n # Calculate qparams should work for empty observers\n qparams = myobs.calculate_qparams()\n x = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)\n y = torch.tensor([5.0, 6.0, 7.0, 8.0])\n out_x = myobs(x)\n self.assertTrue(out_x.requires_grad)\n myobs(y)\n self.assertEqual(myobs.min_val, 2.0)\n self.assertEqual(myobs.max_val, 8.0)\n self.assertEqual(myobs.histogram, [2., 3., 3.])\n\n qparams = myobs.calculate_qparams()\n\n if reduce_range:\n if qscheme == torch.per_tensor_symmetric:\n ref_scale = 0.0470588 * 255 / 127\n ref_zero_point = 0 if qdtype is torch.qint8 else 128\n else:\n ref_scale = 0.0235294 * 255 / 127\n ref_zero_point = -64 if qdtype is torch.qint8 else 0\n else:\n if qscheme == torch.per_tensor_symmetric:\n ref_scale = 0.0470588\n ref_zero_point = 0 if qdtype is torch.qint8 else 128\n else:\n ref_scale = 0.0235294\n ref_zero_point = -128 if qdtype is torch.qint8 else 0\n\n self.assertEqual(qparams[1].item(), ref_zero_point)\n self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)\n # Test for serializability\n state_dict = myobs.state_dict()\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_dict[key])\n loaded_obs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)\n loaded_obs.load_state_dict(loaded_dict)\n loaded_qparams = loaded_obs.calculate_qparams()\n self.assertEqual(myobs.min_val, loaded_obs.min_val)\n self.assertEqual(myobs.max_val, loaded_obs.max_val)\n self.assertEqual(myobs.histogram, loaded_obs.histogram)\n self.assertEqual(myobs.bins, loaded_obs.bins)\n self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())\n\n def test_histogram_observer_one_sided(self):\n myobs = HistogramObserver(bins=8, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True)\n x = torch.tensor([0.0, 0.3, 1.2, 1.7])\n y = torch.tensor([0.1, 1.3, 2.0, 2.7])\n myobs(x)\n myobs(y)\n self.assertEqual(myobs.min_val, 0)\n qparams = myobs.calculate_qparams()\n self.assertEqual(qparams[1].item(), 0)\n\n def test_histogram_observer_same_inputs(self):\n myobs = HistogramObserver(bins=3, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False)\n w = torch.ones(4, requires_grad=True)\n x = torch.zeros(4, requires_grad=True)\n y = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)\n z = torch.tensor([5.0, 6.0, 7.0, 8.0])\n myobs(w)\n myobs(x)\n myobs(x)\n myobs(y)\n myobs(z)\n qparams = myobs.calculate_qparams()\n self.assertEqual(myobs.min_val, 2.0)\n self.assertEqual(myobs.max_val, 8.0)\n self.assertEqual(myobs.histogram, [2., 3., 3.])\n\n @given(N=st.sampled_from([10, 1000]),\n bins=st.sampled_from([256, 512, 1024, 2048]),\n dtype=st.sampled_from([torch.qint8, torch.quint8]),\n qscheme=st.sampled_from([torch.per_tensor_affine, torch.per_tensor_symmetric]),\n reduce_range=st.booleans())\n def test_histogram_observer_against_reference(self, N, bins, dtype, qscheme, reduce_range):\n\n ref_obs = _ReferenceHistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)\n my_obs = HistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)\n\n for _ in range(10):\n X = torch.randn(N)\n my_obs(X)\n ref_obs(X)\n\n ref_qparams = ref_obs.calculate_qparams()\n my_qparams = my_obs.calculate_qparams()\n\n self.assertEqual(ref_qparams, my_qparams)\n\n\nclass TestFakeQuantize(TestCase):\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),\n qparams=hu.qparams(dtypes=torch.qint8)))\n def test_fq_module_per_channel(self, device, X):\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, axis, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n X.requires_grad_()\n fq_module = FakeQuantize(default_per_channel_weight_observer, quant_min, quant_max, ch_axis=axis).to(device)\n Y_prime = fq_module(X)\n assert fq_module.scale is not None\n assert fq_module.zero_point is not None\n Y = _fake_quantize_per_channel_affine_reference(X, fq_module.scale,\n fq_module.zero_point, axis, quant_min, quant_max)\n np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n # Test backward\n dout = torch.rand_like(X, dtype=torch.float, device=device)\n Y_prime.backward(dout)\n dX = _fake_quantize_per_channel_affine_grad_reference(dout, X, fq_module.scale,\n fq_module.zero_point, axis, quant_min, quant_max)\n np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n def test_fq_serializable_per_channel(self):\n observer = default_per_channel_weight_observer\n quant_min = -128\n quant_max = 127\n fq_module = FakeQuantize(observer, quant_min, quant_max)\n X = torch.tensor([[-5, -3.5, -2, 0, 3, 5, 7], [1, 3, 2, 5, 6.5, 8, 10]], dtype=torch.float32)\n y_ref = fq_module(X)\n state_dict = fq_module.state_dict()\n self.assertEqual(state_dict['scale'], [0.054902, 0.078431])\n self.assertEqual(state_dict['zero_point'], [0, 0])\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_dict[key])\n\ndef _get_buffer_ids(module):\n \"\"\"\n Object addresses stay constant if and only if all modifications are in-place\n \"\"\"\n return [id(v) for k, v in module._buffers.items()]\n\nclass TestDistributed(QuantizationTestCase):\n\n def test_observers_preserve_buffers(self):\n \"\"\"\n Tests that observers only modify buffers in place. Note: this is important\n because nn.DataParallel depends on this assumption to work correctly.\n However, DataParallel does not expose IDs of the replicas, so we test it\n without DataParallel in order to easily access the object IDs.\n \"\"\"\n observer_types = [\n torch.quantization.MinMaxObserver.with_args(dtype=torch.qint8),\n torch.quantization.MovingAverageMinMaxObserver.with_args(dtype=torch.qint8),\n torch.quantization.PerChannelMinMaxObserver.with_args(dtype=torch.qint8),\n torch.quantization.MovingAveragePerChannelMinMaxObserver.with_args(dtype=torch.qint8),\n torch.quantization.HistogramObserver.with_args(dtype=torch.qint8),\n torch.quantization.RecordingObserver.with_args(dtype=torch.qint8),\n torch.quantization.PlaceholderObserver.with_args(dtype=torch.float16),\n ]\n\n for observer_type in observer_types:\n observer = observer_type()\n buffer_ids_before = _get_buffer_ids(observer)\n for _i in range(5):\n inputs = torch.rand((4, 4, 4))\n observer(inputs)\n buffer_ids_after = _get_buffer_ids(observer)\n self.assertEqual(\n buffer_ids_before,\n buffer_ids_after,\n msg=\"{}: Buffers must be modified in place\".format(str(observer)))\n\n def test_fake_quant_preserves_buffers(self):\n \"\"\"\n Tests that fake quant only modifies buffers in place. Note: this is important\n because nn.DataParallel depends on this assumption to work correctly.\n However, DataParallel does not expose IDs of the replicas, so we test it\n without DataParallel in order to easily access the object IDs.\n \"\"\"\n model = torch.quantization.FakeQuantize()\n buffer_ids_before = _get_buffer_ids(model)\n for _i in range(5):\n inputs = torch.rand((4, 4, 4))\n model(inputs)\n model.apply(torch.quantization.enable_fake_quant)\n model.apply(torch.quantization.disable_fake_quant)\n model.apply(torch.quantization.enable_observer)\n model.apply(torch.quantization.disable_observer)\n buffer_ids_after = _get_buffer_ids(model)\n self.assertEqual(\n buffer_ids_before,\n buffer_ids_after,\n msg=\"FakeQuant: Buffers must be modified in place\")\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_qat_data_parallel(self):\n \"\"\"\n Tests that doing QAT in nn.DataParallel does not crash.\n \"\"\"\n if 'fbgemm' not in torch.backends.quantized.supported_engines:\n return\n with override_quantized_engine('fbgemm'):\n device = torch.device('cuda')\n\n model = nn.Sequential(\n torch.quantization.QuantStub(),\n nn.Conv2d(3, 1, 1, bias=False),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.Conv2d(1, 2, 3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(2),\n nn.AvgPool2d(14),\n nn.Sigmoid(),\n torch.quantization.DeQuantStub(),\n )\n\n torch.quantization.fuse_modules(model, [['1', '2', '3'], ['4', '5']], inplace=True)\n\n model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n torch.quantization.prepare_qat(model, inplace=True)\n model = nn.DataParallel(model, device_ids=[0, 1])\n model.to(device)\n model.train()\n\n for epoch in range(3):\n inputs = torch.rand(2, 3, 28, 28).to(device)\n model(inputs)\n if epoch >= 1:\n model.apply(torch.quantization.disable_observer)\n if epoch >= 2:\n model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)\n quant_model = copy.deepcopy(model.module)\n quant_model = torch.quantization.convert(quant_model.eval().cpu(), inplace=False)\n with torch.no_grad():\n out = quant_model(torch.rand(1, 3, 28, 28))\n\n def test_qat_convbn_fused_syncbn_replacement(self):\n \"\"\"\n Tests that SyncBatchNorm replacement works for fused ConvBN.\n \"\"\"\n if 'fbgemm' not in torch.backends.quantized.supported_engines:\n return\n with override_quantized_engine('fbgemm'):\n # create conv-bn\n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.conv = nn.Conv2d(4, 1, 3, padding=1)\n self.bn = nn.BatchNorm2d(1)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n model = Model()\n # fuse it\n fused_model = torch.quantization.fuse_modules(\n model,\n [['conv', 'bn']],\n )\n # convert to QAT\n fused_model.qconfig = torch.quantization.get_default_qconfig('fbgemm')\n torch.quantization.prepare_qat(fused_model, inplace=True)\n # replace with DDP\n fused_model = nn.SyncBatchNorm.convert_sync_batchnorm(fused_model)\n self.assertTrue(\n isinstance(fused_model.conv.bn, nn.SyncBatchNorm),\n \"Expected BN to be converted to SyncBN\")\n\n def test_syncbn_preserves_qconfig(self):\n \"\"\"\n Makes sure that if a BatchNorm is not fused and a qconfig exists,\n convering the module to SyncBatchNorm preserves the qconfig.\n \"\"\"\n m = nn.Sequential(\n nn.Conv2d(1, 1, 1),\n nn.BatchNorm2d(1),\n )\n m[1].qconfig = torch.quantization.default_qconfig\n m = torch.nn.SyncBatchNorm.convert_sync_batchnorm(m)\n self.assertTrue(\n hasattr(m[1], \"qconfig\"),\n \"missing qconfig after SyncBatchNorm conversion\")\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @override_qengines\n def test_device_affinity(self):\n \"\"\"\n Tests that converting a model to QAT respects device affinity\n \"\"\"\n class Model(nn.Module):\n\n def __init__(self):\n super(Model, self).__init__()\n self.conv = nn.Conv2d(1, 1, 1)\n self.bn = nn.BatchNorm2d(1)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n model = Model()\n model.qconfig = torch.quantization.get_default_qat_qconfig(torch.backends.quantized.engine)\n device = torch.device('cuda:0')\n model.to(device)\n torch.quantization.prepare_qat(model, inplace=True)\n model_devices = {p.device for p in model.parameters()} | \\\n {p.device for p in model.buffers()}\n self.assertEqual(len(model_devices), 1)\n model_device = next(iter(model_devices))\n self.assertEqual(model_device, device)\n\n # ensure that running an input on CUDA works without any needed changes\n input = torch.randn(4, 1, 4, 4, device=device)\n model(input)\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_quantization.py TESTNAME\\n\\n\"\n \"instead.\")\n" ]
[ [ "torch.quantization.RecordingObserver", "torch.quantization.HistogramObserver", "torch.jit.script", "torch.rand_like", "torch.rand", "torch.quantization.load_observer_state_dict", "numpy.random.seed", "torch.no_grad", "torch.cumsum", "torch.cuda.is_available", "torch.jit.save", "torch.nn.Conv2d", "torch.quantization.fuse_modules", "torch.jit.load", "torch.nn.Sigmoid", "torch.quantization.QuantStub", "torch.testing._internal.common_quantization.SingleLayerLinearModel", "torch.testing._internal.hypothesis_utils.array_shapes", "torch.quantization.PerChannelMinMaxObserver.with_args", "torch.nn.BatchNorm2d", "torch.quantization.MovingAveragePerChannelMinMaxObserver", "torch.quantization.PerChannelMinMaxObserver", "torch.randn", "torch.iinfo", "torch.save", "torch.quantization.get_default_qconfig", "torch.quantization.MovingAveragePerChannelMinMaxObserver.with_args", "torch.quantization.get_default_qat_qconfig", "torch.nn.AvgPool2d", "torch.quantization.MinMaxObserver", "torch.nn.DataParallel", "torch.quantization.FakeQuantize", "torch.device", "torch.quantization.prepare_qat", "torch.quantization.MovingAverageMinMaxObserver.with_args", "torch.ones", "torch.testing._internal.common_quantized._fake_quantize_per_channel_affine_reference", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.load", "torch.quantization.MovingAverageMinMaxObserver", "torch.quantization.get_observer_state_dict", "torch.quantization.RecordingObserver.with_args", "torch.testing._internal.hypothesis_utils.qparams", "torch.tensor", "torch.testing._internal.hypothesis_utils.assert_deadline_disabled", "torch.testing._internal.common_quantization.AnnotatedSingleLayerLinearModel", "torch.quantization.PlaceholderObserver.with_args", "torch.quantization.prepare", "torch.quantization.QConfig", "torch.quantization.DeQuantStub", "torch.testing._internal.common_quantization.test_only_eval_fn", "torch.quantization.default_histogram_observer", "torch.testing._internal.common_quantized.to_tensor", "torch.Size", "torch.testing._internal.common_quantized.override_quantized_engine", "torch.testing._internal.common_quantized._fake_quantize_per_channel_affine_grad_reference", "torch.quantization.MinMaxObserver.with_args", "torch.equal", "torch.quantization.get_observer_dict", "torch.quantization.prepare_jit", "torch.quantization.HistogramObserver.with_args", "torch.zeros", "torch.nn.ReLU" ] ]
karthiksekaran/mlprojects
[ "a16a4adb20e559b54a78f4e6fd26da520b5ea851" ]
[ "Decision Tree - Churn.py" ]
[ "#import modules\r\nimport pandas # for dataframes\r\nimport matplotlib.pyplot as plt # for plotting graphs\r\nimport seaborn as sns # for plotting graphs\r\n\r\ndata=pandas.read_csv('HR_comma_sep.csv')\r\n\r\n# Import LabelEncoder\r\nfrom sklearn import preprocessing\r\n#creating labelEncoder\r\nle = preprocessing.LabelEncoder()\r\n# Converting string labels into numbers.\r\ndata['salary']=le.fit_transform(data['salary'])\r\ndata['Departments ']=le.fit_transform(data['Departments '])\r\n\r\n#Spliting data into Feature and\r\nX=data[['satisfaction_level', 'last_evaluation', 'number_project',\r\n 'average_montly_hours', 'time_spend_company', 'Work_accident',\r\n 'promotion_last_5years', 'Departments ', 'salary']]\r\ny=data['left']\r\n# Import train_test_split function\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Split dataset into training set and test set\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # 70% training and 30% test\r\n#Import Gradient Boosting Classifier model\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\n\r\n#Create Gradient Boosting Classifier\r\ngb = GradientBoostingClassifier()\r\n\r\n#Train the model using the training sets\r\ngb.fit(X_train, y_train)\r\n\r\n#Predict the response for test dataset\r\ny_pred = gb.predict(X_test)\r\n\r\n#Import scikit-learn metrics module for accuracy calculation\r\nfrom sklearn import metrics\r\n# Model Accuracy, how often is the classifier correct?\r\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\r\n# Model Precision\r\nprint(\"Precision:\",metrics.precision_score(y_test, y_pred))\r\n# Model Recall\r\nprint(\"Recall:\",metrics.recall_score(y_test, y_pred))" ]
[ [ "pandas.read_csv", "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_score", "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.recall_score", "sklearn.ensemble.GradientBoostingClassifier", "sklearn.model_selection.train_test_split" ] ]
i-ivanova/Explorify
[ "54d14eedb121bb8cb972f86c807c7fe4ae174ac1" ]
[ "pipeline/jonatan_pipeline.py" ]
[ "import json\nimport pandas as pd\nimport numpy as np\nimport pprint\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom jonatan_settings import data_path, write_final_df, write_dr_results, audio_features\nfrom jonatan_scrape import get_track_data, get_artist_data, get_audio_feature_data\nfrom jonatan_dr import compute_dr_results\n\n\ndef read_streaming_history():\n with open(data_path + \"StreamingHistory0.json\", mode=\"r\", encoding=\"utf-8\") as f:\n data = json.loads(f.read())\n\n df = pd.DataFrame(data)\n df.endTime = pd.to_datetime(df.endTime)\n return df\n\ndef create_full_df(streamingHistory):\n track_data = get_track_data(streamingHistory)\n artist_data = get_artist_data(streamingHistory)\n track_features = get_audio_feature_data(track_data)\n\n # related_artists = pd.Series(scrape_related_artists(artist_data.artist_id), name=\"id\")\n \n merged = pd.merge(streamingHistory, artist_data, left_on='artistName', right_on='artist_name', how='inner')\n print(f\"\\tlost {streamingHistory.shape[0] - merged.shape[0]} entries when merging with artist_data\")\n print(streamingHistory[~streamingHistory.artistName.isin(merged.artistName)])\n \n merged = pd.merge(merged, track_data, left_on=[\"artistName\", \"trackName\"], right_index=True, how=\"left\")\n \n merged = pd.merge(merged, track_features, left_on=\"track_id\", right_index=True, how=\"left\")\n\n if write_final_df:\n keep_columns = list(streamingHistory.columns) \\\n + [\"artist_genres\", \"artist_id\", \"artist_popularity\", \"track_duration_ms\", \"track_id\", \"track_popularity\"]\n write_df = merged[keep_columns]\n json_str = write_df.to_json(orient=\"records\")\n with open(data_path + \"merged_history.json\", mode=\"w+\", encoding=\"utf-8\") as f:\n f.write(json_str)\n\n return merged\n\ndef get_dr_results(merged):\n for col in audio_features:\n merged[col] = merged[col].transform(float)\n merged[audio_features] = StandardScaler().fit_transform(merged[audio_features]) # Alternative: use MinMaxScaler to fit in specific range like [0, 1]\n\n artist_data = get_artist_data(merged)\n\n # drop entries where features are missing\n nan_entries = merged.danceability.isna()\n print(f\"\\tlost {nan_entries.sum()} entries when droppping entries missing features\")\n print(merged[nan_entries])\n merged = merged[~nan_entries]\n\n dr_results = compute_dr_results(merged, artist_data)\n\n if write_dr_results:\n dr_results = dr_results.replace([np.nan], [None])\n json_str = dr_results.to_json(orient=\"records\")\n with open(data_path + \"dr_results.json\", mode=\"w+\", encoding=\"utf-8\") as f:\n f.write(json_str)\n \n return dr_results\n\ndef main():\n print(\"Starting up pipeline...\")\n print(\"Reading streaming history...\")\n streamingHistory = read_streaming_history()\n print(\"Constructing complete dataset...\")\n merged_df = create_full_df(streamingHistory)\n print(\"Performing dimensionality reduction...\")\n dr_results = get_dr_results(merged_df)\n print(\"COMPLETE!\")\n\nif __name__ == '__main__':\n main()" ]
[ [ "sklearn.preprocessing.StandardScaler", "pandas.to_datetime", "pandas.DataFrame", "pandas.merge" ] ]
toandaominh1997/understanding_cloud_organization
[ "7da991ff3da557c18f4585c1b956ed799c104c7c" ]
[ "utils/metric.py" ]
[ "import torch \nimport numpy as np \n\n\nclass AverageMetric(object):\n def __init__(self, threshold=0.5):\n self.dice_scores = []\n self.threshold = threshold\n def update(self, outputs, labels):\n with torch.no_grad():\n probs = torch.sigmoid(outputs)\n dice_score = self.dice_metric(probability=probs, truth = labels)\n self.dice_scores.append(dice_score)\n def value(self):\n return np.mean(self.dice_scores)\n def reset(self):\n self.dice_scores = []\n def dice_metric(self, probability, truth):\n probability = torch.sigmoid(probability)\n batch_size = len(truth)\n with torch.no_grad():\n probability = probability.view(batch_size, -1)\n truth = truth.view(batch_size, -1)\n assert(probability.shape == truth.shape) \n p = (probability > self.threshold).float() \n t = (truth > 0.5).float() \n t_sum = t.sum(-1)\n p_sum = p.sum(-1)\n neg_index = torch.nonzero(t_sum == 0)\n pos_index = torch.nonzero(t_sum >= 1)\n dice_neg = (p_sum == 0).float()\n dice_pos = 2 * (p*t).sum(-1)/((p+t).sum(-1))\n dice_neg = dice_neg[neg_index]\n dice_pos = dice_pos[pos_index]\n dice = torch.cat([dice_pos, dice_neg]) \n dice = dice.mean().item()\n return dice\n \n" ]
[ [ "torch.nonzero", "torch.no_grad", "torch.sigmoid", "torch.cat", "numpy.mean" ] ]
n3urovirtual/EyeTracking_Experiment_Children
[ "56c08a1202a686ac8f05d3c4e53766537895fe4f" ]
[ "Learning Scripts/Learn_PT.py" ]
[ "\"\"\"APPLY PYTHAGOREAN THEOREM IN LEARNING DATA + SMOOTH VELOCITIES\"\"\"\n\nimport os\nimport itertools\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import savgol_filter\nfrom helper import img_id, sub_id, TRIALS_PATH\n\n# Apply PT into smoothed learning data to find sample-to-sample distance:\n\nfor file in os.listdir(TRIALS_PATH):\n dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))\n x = dataset[\"BPOGX\"].diff().fillna(0).to_numpy()\n y = dataset[\"BPOGY\"].diff().fillna(0).to_numpy()\n sample_2_sample_distance = (x ** 2 + y ** 2) ** 0.5\n dataset[\"Distance\"] = np.nan_to_num(sample_2_sample_distance)\n dataset[\"Time\"] = dataset[\"TIME\"].diff().fillna(0).to_numpy()\n dataset[\"Velocity_px\"] = dataset[\"Distance\"] / dataset[\"Time\"]\n dataset[\"Velocity_deg\"] = dataset[\"Velocity_px\"] * 0.021\n dataset[\"Velocity_deg\"] = dataset[\"Velocity_deg\"].fillna(0)\n dataset = dataset[dataset[\"Velocity_deg\"] != 0]\n vel = dataset[\"Velocity_deg\"]\n sav_vel = savgol_filter(vel, 13, 2)\n dataset[\"Smoothed_Velocity_deg\"] = sav_vel.tolist()\n fix_or_sac = dataset[\"Smoothed_Velocity_deg\"] > 120\n dataset[\"Fix_or_Sac\"] = np.where(fix_or_sac, \"Sac\", \"Fix\")\n write_f = dataset[dataset[\"Smoothed_Velocity_deg\"] < 1000]\n write_f.to_csv(os.path.join(TRIALS_PATH, file), index=False)\n\n\n# Plot smoothed velocity vs. unsmoothed velocity\nfor k, i in itertools.product(sub_id, img_id):\n try:\n file = (\n \"Sub_\" + str(k) + \"_Image_\" + i.split(\".\")[0] + \"_Block_4.csv\"\n ) # Block 1,2,3,4\n dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20, 11))\n fig.suptitle(\n f'Subject:{str(k)} , Image:{i.split(\".\")[0]}, Block: 4', size=30\n ) # Block 1,2,3,4\n time = dataset[\"Time\"].cumsum()\n smoothed_velocity1 = dataset[\"Velocity_deg\"]\n smoothed_velocity2 = dataset[\"Smoothed_Velocity_deg\"]\n ax1.plot(time, smoothed_velocity1)\n ax1.set_ylim([0, 1000])\n ax1.set_title(\"Unsmoothed velocity\", size=15)\n ax2.plot(time, smoothed_velocity2)\n ax2.set_ylim([0, 1000])\n ax2.set_title(\"Smoothed velocity\", size=15)\n # plt.axhline(90, color='red')\n # plt.title(f'Subject:{str(k)} , Image:{i.split(\".\")[0]} , Block: 1')\n ax2.axhline(120, color=\"red\")\n fig.text(\n 0.5, \n 0.04, \n \"Time (in seconds)\", \n ha=\"center\", \n va=\"center\", \n fontsize=22\n )\n fig.text(\n 0.08,\n 0.5,\n \"Velocity (deg/sec.)\",\n ha=\"center\",\n va=\"center\",\n rotation=\"vertical\",\n fontsize=22,\n )\n plt.show()\n plt.close()\n except OSError:\n continue\n\n#Plot to fine-tune the velocity threshold\nfor k, i in itertools.product(sub_id, img_id):\n file = (\n \"Sub_\" + str(k) + \"_Image_\" + i.split(\".\")[0] + \"_Block_1.csv\"\n ) # Block 1,2,3,4\n dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))\n time = dataset[\"Time\"].cumsum().fillna(0)\n velocity = dataset[\"Smoothed_Velocity_deg\"]\n plt.plot(time, velocity)\n plt.axhline(100, color=\"red\")\n plt.ylim(0, 1000)\n plt.title(f\"Subject:{str(k)} , Image:{str(i)}\")\n plt.xlabel(\"Time (sec)\")\n plt.ylabel(\"Velocity values\")\n plt.show()\n plt.close()\n" ]
[ [ "scipy.signal.savgol_filter", "matplotlib.pyplot.axhline", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "matplotlib.pyplot.plot", "numpy.where", "numpy.nan_to_num", "matplotlib.pyplot.xlabel" ] ]
the-timoye/spark-examples
[ "d8784653a862e09e1c755ed2055d37a5516d2c3f" ]
[ "data_lakes.py" ]
[ "import pandas as pd\nimport pyspark.sql.functions as F\nfrom pyspark.sql import SparkSession\n\nimport sparknlp\nsparknlp.start()\nfrom sparknlp.pretrained import PretrainedPipeline\n\npd.set_option('max_colwidth', 800)\nspark = SparkSession.builder.config(\"spark.jars.packages\", \"com.johnsnowlabs.nlp:spark-nlp_2.12:3.0.3\").getOrCreate()\nspark\n\ndata_path = 'data/reddit-worldnews.json'\ndf = spark.read.json(data_path)\n\nprint('dataframe count = {}'.format(df.count()))\n\ntitle = 'data.title'\nauthor = 'data.author'\n\nprint('============== AUTHOR Vs TITLE ==============')\ndf_author_title = df.select(title, author)\nprint(df_author_title.limit(10).toPandas())\n\nprint('============== WORD COUNT ==============')\ndf_word_count = df.select(F.explode(F.split(title, '\\\\s+')).alias(\"word\")).groupBy(\"word\").count().sort(F.desc('count'))\nprint(df_word_count.limit(20).toPandas())\n\nprint('============== ANNOTATED DATAFRAME SCHEMA ==============')\nexplain_document_pipeline = PretrainedPipeline(\"explain_document_ml\")\ndf_annotated = explain_document_pipeline.annotate(df_author_title, \"title\")\ndf_annotated.printSchema()\n\nprint('============== QUERY MAPPEED TYPE SUB-FIELDS ==============')\ndf_check_data = df_annotated.select([\"text\", \"pos.metadata\", \"pos.result\"])\nprint(df_check_data.limit(10).toPandas())\n\n\n# extract POS from the annotated dataframe\ndf_pos = df_annotated.select(F.explode(\"pos\").alias(\"pos\"))\nprint(df_pos.toPandas())\ndf_pos.printSchema()\n\nprint('============== VIEW ONLY PROPER NOUNS ==============')\ndf_pos_nouns = df_pos.where(\"pos.result = 'NNP' OR pos.result = 'NNPS'\")\ndf_nouns = df_pos_nouns.selectExpr([\"pos.metadata['word'] AS word\", \"pos.result AS part_of_speech\"])\nprint(df_nouns.limit(10).toPandas())\n\nprint('============== VIEW MOST USED NOUNS==============')\ndf_common_nouns = df_nouns.groupBy(\"word\").count().sort(F.desc(\"count\"))\nprint(df_common_nouns.toPandas())" ]
[ [ "pandas.set_option" ] ]
jjaramillo34/pyimagesearchuniversity_course
[ "0a4a26c29a6f8122f6a03d3393ac01ebbc14a391", "0a4a26c29a6f8122f6a03d3393ac01ebbc14a391" ]
[ "Aumented Reality 101/Fiducials and Markers/opencv-generate-aruco/opencv_generate_aruco.py", "OpenCV 102/Basic Image Processing Operations/auto-canny/auto_canny_practice.py" ]
[ "# USAGE\n# python opencv_generate_aruco.py --id 24 --type DICT_5X5_100 --output tags/DICT_5X5_100_id24.png\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport cv2\nimport sys\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-o\", \"--output\", required=True,\n\thelp=\"path to output image containing ArUCo tag\")\nap.add_argument(\"-i\", \"--id\", type=int, required=True,\n\thelp=\"ID of ArUCo tag to generate\")\nap.add_argument(\"-t\", \"--type\", type=str,\n\tdefault=\"DICT_ARUCO_ORIGINAL\",\n\thelp=\"type of ArUCo tag to generate\")\nargs = vars(ap.parse_args())\n\n# define names of each possible ArUco tag OpenCV supports\nARUCO_DICT = {\n\t\"DICT_4X4_50\": cv2.aruco.DICT_4X4_50,\n\t\"DICT_4X4_100\": cv2.aruco.DICT_4X4_100,\n\t\"DICT_4X4_250\": cv2.aruco.DICT_4X4_250,\n\t\"DICT_4X4_1000\": cv2.aruco.DICT_4X4_1000,\n\t\"DICT_5X5_50\": cv2.aruco.DICT_5X5_50,\n\t\"DICT_5X5_100\": cv2.aruco.DICT_5X5_100,\n\t\"DICT_5X5_250\": cv2.aruco.DICT_5X5_250,\n\t\"DICT_5X5_1000\": cv2.aruco.DICT_5X5_1000,\n\t\"DICT_6X6_50\": cv2.aruco.DICT_6X6_50,\n\t\"DICT_6X6_100\": cv2.aruco.DICT_6X6_100,\n\t\"DICT_6X6_250\": cv2.aruco.DICT_6X6_250,\n\t\"DICT_6X6_1000\": cv2.aruco.DICT_6X6_1000,\n\t\"DICT_7X7_50\": cv2.aruco.DICT_7X7_50,\n\t\"DICT_7X7_100\": cv2.aruco.DICT_7X7_100,\n\t\"DICT_7X7_250\": cv2.aruco.DICT_7X7_250,\n\t\"DICT_7X7_1000\": cv2.aruco.DICT_7X7_1000,\n\t\"DICT_ARUCO_ORIGINAL\": cv2.aruco.DICT_ARUCO_ORIGINAL,\n\t\"DICT_APRILTAG_16h5\": cv2.aruco.DICT_APRILTAG_16h5,\n\t\"DICT_APRILTAG_25h9\": cv2.aruco.DICT_APRILTAG_25h9,\n\t\"DICT_APRILTAG_36h10\": cv2.aruco.DICT_APRILTAG_36h10,\n\t\"DICT_APRILTAG_36h11\": cv2.aruco.DICT_APRILTAG_36h11\n}\n\n# verify that the supplied ArUCo tag exists and is supported by\n# OpenCV\nif ARUCO_DICT.get(args[\"type\"], None) is None:\n\tprint(\"[INFO] ArUCo tag of '{}' is not supported\".format(\n\t\targs[\"type\"]))\n\tsys.exit(0)\n\n# load the ArUCo dictionary\narucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[args[\"type\"]])\n\n# allocate memory for the output ArUCo tag and then draw the ArUCo\n# tag on the output image\nprint(\"[INFO] generating ArUCo tag type '{}' with ID '{}'\".format(\n\targs[\"type\"], args[\"id\"]))\ntag = np.zeros((300, 300, 1), dtype=\"uint8\")\ncv2.aruco.drawMarker(arucoDict, args[\"id\"], 300, tag, 1)\n\n# write the generated ArUCo tag to disk and then display it to our\n# screen\ncv2.imwrite(args[\"output\"], tag)\ncv2.imshow(\"ArUCo Tag\", tag)\ncv2.waitKey(0)", "# USAGE\n# python auto_canny_practice.py --images images\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport glob\nimport cv2\n\ndef auto_canny(image, sigma=0.33):\n # compute the median of the single channel pixel intensities\n v = np.median(image)\n # apply automatic Canny edge detection using the computed median\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 - sigma) * v))\n edged = cv2.Canny(image, lower, upper)\n \n # return the edged image\n return edged\n\n# construct the argument parser and the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--images\", type=str, required=True,\n\thelp=\"path to the input image\")\nargs = vars(ap.parse_args())\n\n# loop over the images\nfor imagePath in glob.glob(args[\"images\"] + \"/*.jpg\"):\n # convert the image to grayscale and blur it slightly\n image = cv2.imread(imagePath)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (3, 3), 0)\n \n # apply Canny edge detection using a wide threshold, tight threshold, \n # and automatically determined threshold\n wide = cv2.Canny(blurred, 10, 200)\n tight = cv2.Canny(blurred, 225, 250)\n auto = auto_canny(blurred)\n \n # show the images\n cv2.imshow(\"Original\", image)\n cv2.imshow(\"Edges\", np.hstack([wide, tight, auto]))\n cv2.waitKey(0)" ]
[ [ "numpy.zeros" ], [ "numpy.hstack", "numpy.median" ] ]
lnmdlong/mmdetection
[ "87768a5d0a0188d46c50b575b417e9ec2fb5c06c" ]
[ "mmdet/models/roi_heads/mask_heads/fcn_mask_head.py" ]
[ "import os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer\nfrom mmcv.ops.carafe import CARAFEPack\nfrom mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core import mask_target\nfrom mmdet.models.builder import HEADS, build_loss\n\nBYTES_PER_FLOAT = 4\n# TODO: This memory limit may be too much or too little. It would be better to\n# determine it based on available resources.\nGPU_MEM_LIMIT = 1024**3 # 1 GB memory limit\n\n\[email protected]_module()\nclass FCNMaskHead(BaseModule):\n\n def __init__(self,\n num_convs=4,\n roi_feat_size=14,\n in_channels=256,\n conv_kernel_size=3,\n conv_out_channels=256,\n num_classes=80,\n class_agnostic=False,\n upsample_cfg=dict(type='deconv', scale_factor=2),\n conv_cfg=None,\n norm_cfg=None,\n predictor_cfg=dict(type='Conv'),\n loss_mask=dict(\n type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),\n init_cfg=None):\n assert init_cfg is None, 'To prevent abnormal initialization ' \\\n 'behavior, init_cfg is not allowed to be set'\n super(FCNMaskHead, self).__init__(init_cfg)\n self.upsample_cfg = upsample_cfg.copy()\n if self.upsample_cfg['type'] not in [\n None, 'deconv', 'nearest', 'bilinear', 'carafe'\n ]:\n raise ValueError(\n f'Invalid upsample method {self.upsample_cfg[\"type\"]}, '\n 'accepted methods are \"deconv\", \"nearest\", \"bilinear\", '\n '\"carafe\"')\n self.num_convs = num_convs\n # WARN: roi_feat_size is reserved and not used\n self.roi_feat_size = _pair(roi_feat_size)\n self.in_channels = in_channels\n self.conv_kernel_size = conv_kernel_size\n self.conv_out_channels = conv_out_channels\n self.upsample_method = self.upsample_cfg.get('type')\n self.scale_factor = self.upsample_cfg.pop('scale_factor', None)\n self.num_classes = num_classes\n self.class_agnostic = class_agnostic\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.predictor_cfg = predictor_cfg\n self.fp16_enabled = False\n self.loss_mask = build_loss(loss_mask)\n\n self.convs = ModuleList()\n for i in range(self.num_convs):\n in_channels = (\n self.in_channels if i == 0 else self.conv_out_channels)\n padding = (self.conv_kernel_size - 1) // 2\n self.convs.append(\n ConvModule(\n in_channels,\n self.conv_out_channels,\n self.conv_kernel_size,\n padding=padding,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg))\n upsample_in_channels = (\n self.conv_out_channels if self.num_convs > 0 else in_channels)\n upsample_cfg_ = self.upsample_cfg.copy()\n if self.upsample_method is None:\n self.upsample = None\n elif self.upsample_method == 'deconv':\n upsample_cfg_.update(\n in_channels=upsample_in_channels,\n out_channels=self.conv_out_channels,\n kernel_size=self.scale_factor,\n stride=self.scale_factor)\n self.upsample = build_upsample_layer(upsample_cfg_)\n elif self.upsample_method == 'carafe':\n upsample_cfg_.update(\n channels=upsample_in_channels, scale_factor=self.scale_factor)\n self.upsample = build_upsample_layer(upsample_cfg_)\n else:\n # suppress warnings\n align_corners = (None\n if self.upsample_method == 'nearest' else False)\n upsample_cfg_.update(\n scale_factor=self.scale_factor,\n mode=self.upsample_method,\n align_corners=align_corners)\n self.upsample = build_upsample_layer(upsample_cfg_)\n\n out_channels = 1 if self.class_agnostic else self.num_classes\n logits_in_channel = (\n self.conv_out_channels\n if self.upsample_method == 'deconv' else upsample_in_channels)\n self.conv_logits = build_conv_layer(self.predictor_cfg,\n logits_in_channel, out_channels, 1)\n self.relu = nn.ReLU(inplace=True)\n self.debug_imgs = None\n\n def init_weights(self):\n super(FCNMaskHead, self).init_weights()\n for m in [self.upsample, self.conv_logits]:\n if m is None:\n continue\n elif isinstance(m, CARAFEPack):\n m.init_weights()\n else:\n nn.init.kaiming_normal_(\n m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.constant_(m.bias, 0)\n\n @auto_fp16()\n def forward(self, x):\n for conv in self.convs:\n x = conv(x)\n if self.upsample is not None:\n x = self.upsample(x)\n if self.upsample_method == 'deconv':\n x = self.relu(x)\n mask_pred = self.conv_logits(x)\n return mask_pred\n\n def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):\n pos_proposals = [res.pos_bboxes for res in sampling_results]\n pos_assigned_gt_inds = [\n res.pos_assigned_gt_inds for res in sampling_results\n ]\n mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,\n gt_masks, rcnn_train_cfg)\n return mask_targets\n\n @force_fp32(apply_to=('mask_pred', ))\n def loss(self, mask_pred, mask_targets, labels):\n \"\"\"\n Example:\n >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA\n >>> N = 7 # N = number of extracted ROIs\n >>> C, H, W = 11, 32, 32\n >>> # Create example instance of FCN Mask Head.\n >>> # There are lots of variations depending on the configuration\n >>> self = FCNMaskHead(num_classes=C, num_convs=1)\n >>> inputs = torch.rand(N, self.in_channels, H, W)\n >>> mask_pred = self.forward(inputs)\n >>> sf = self.scale_factor\n >>> labels = torch.randint(0, C, size=(N,))\n >>> # With the default properties the mask targets should indicate\n >>> # a (potentially soft) single-class label\n >>> mask_targets = torch.rand(N, H * sf, W * sf)\n >>> loss = self.loss(mask_pred, mask_targets, labels)\n >>> print('loss = {!r}'.format(loss))\n \"\"\"\n loss = dict()\n if mask_pred.size(0) == 0:\n loss_mask = mask_pred.sum()\n else:\n if self.class_agnostic:\n loss_mask = self.loss_mask(mask_pred, mask_targets,\n torch.zeros_like(labels))\n else:\n loss_mask = self.loss_mask(mask_pred, mask_targets, labels)\n loss['loss_mask'] = loss_mask\n return loss\n\n def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,\n ori_shape, scale_factor, rescale):\n \"\"\"Get segmentation masks from mask_pred and bboxes.\n\n Args:\n mask_pred (Tensor or ndarray): shape (n, #class, h, w).\n For single-scale testing, mask_pred is the direct output of\n model, whose type is Tensor, while for multi-scale testing,\n it will be converted to numpy array outside of this method.\n det_bboxes (Tensor): shape (n, 4/5)\n det_labels (Tensor): shape (n, )\n rcnn_test_cfg (dict): rcnn testing config\n ori_shape (Tuple): original image height and width, shape (2,)\n scale_factor(float | Tensor): If ``rescale is True``, box\n coordinates are divided by this scale factor to fit\n ``ori_shape``.\n rescale (bool): If True, the resulting masks will be rescaled to\n ``ori_shape``.\n\n Returns:\n list[list]: encoded masks. The c-th item in the outer list\n corresponds to the c-th class. Given the c-th outer list, the\n i-th item in that inner list is the mask for the i-th box with\n class label c.\n\n Example:\n >>> import mmcv\n >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA\n >>> N = 7 # N = number of extracted ROIs\n >>> C, H, W = 11, 32, 32\n >>> # Create example instance of FCN Mask Head.\n >>> self = FCNMaskHead(num_classes=C, num_convs=0)\n >>> inputs = torch.rand(N, self.in_channels, H, W)\n >>> mask_pred = self.forward(inputs)\n >>> # Each input is associated with some bounding box\n >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)\n >>> det_labels = torch.randint(0, C, size=(N,))\n >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })\n >>> ori_shape = (H * 4, W * 4)\n >>> scale_factor = torch.FloatTensor((1, 1))\n >>> rescale = False\n >>> # Encoded masks are a list for each category.\n >>> encoded_masks = self.get_seg_masks(\n >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,\n >>> scale_factor, rescale\n >>> )\n >>> assert len(encoded_masks) == C\n >>> assert sum(list(map(len, encoded_masks))) == N\n \"\"\"\n if not isinstance(mask_pred, torch.Tensor):\n mask_pred = det_bboxes.new_tensor(mask_pred)\n\n device = mask_pred.device\n cls_segms = [[] for _ in range(self.num_classes)\n ] # BG is not included in num_classes\n bboxes = det_bboxes[:, :4]\n labels = det_labels\n # No need to consider rescale and scale_factor while exporting to ONNX\n if torch.onnx.is_in_onnx_export():\n img_h, img_w = ori_shape[:2]\n else:\n if rescale:\n img_h, img_w = ori_shape[:2]\n else:\n if isinstance(scale_factor, float):\n img_h = np.round(ori_shape[0] * scale_factor).astype(\n np.int32)\n img_w = np.round(ori_shape[1] * scale_factor).astype(\n np.int32)\n else:\n w_scale, h_scale = scale_factor[0], scale_factor[1]\n img_h = np.round(ori_shape[0] * h_scale.item()).astype(\n np.int32)\n img_w = np.round(ori_shape[1] * w_scale.item()).astype(\n np.int32)\n scale_factor = 1.0\n\n if not isinstance(scale_factor, (float, torch.Tensor)):\n scale_factor = bboxes.new_tensor(scale_factor)\n bboxes = bboxes / scale_factor\n\n # support exporting to ONNX\n if torch.onnx.is_in_onnx_export():\n threshold = rcnn_test_cfg.mask_thr_binary\n if not self.class_agnostic:\n box_inds = torch.arange(mask_pred.shape[0])\n mask_pred = mask_pred[box_inds, labels][:, None]\n masks, _ = _do_paste_mask(\n mask_pred, bboxes, img_h, img_w, skip_empty=False)\n if threshold >= 0:\n masks = (masks >= threshold).to(dtype=torch.bool)\n else:\n # TensorRT backend does not have data type of uint8\n is_trt_backend = os.environ.get(\n 'ONNX_BACKEND') == 'MMCVTensorRT'\n target_dtype = torch.int32 if is_trt_backend else torch.uint8\n masks = (masks * 255).to(dtype=target_dtype)\n return masks\n\n N = len(mask_pred)\n # The actual implementation split the input into chunks,\n # and paste them chunk by chunk.\n if device.type == 'cpu':\n # CPU is most efficient when they are pasted one by one with\n # skip_empty=True, so that it performs minimal number of\n # operations.\n num_chunks = N\n else:\n # GPU benefits from parallelism for larger chunks,\n # but may have memory issue\n num_chunks = int(\n np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))\n assert (num_chunks <=\n N), 'Default GPU_MEM_LIMIT is too small; try increasing it'\n chunks = torch.chunk(torch.arange(N, device=device), num_chunks)\n\n threshold = rcnn_test_cfg.mask_thr_binary\n im_mask = torch.zeros(\n N,\n img_h,\n img_w,\n device=device,\n dtype=torch.bool if threshold >= 0 else torch.uint8)\n\n if not self.class_agnostic:\n mask_pred = mask_pred[range(N), labels][:, None]\n\n for inds in chunks:\n masks_chunk, spatial_inds = _do_paste_mask(\n mask_pred[inds],\n bboxes[inds],\n img_h,\n img_w,\n skip_empty=device.type == 'cpu')\n\n if threshold >= 0:\n masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)\n else:\n # for visualization and debugging\n masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)\n\n im_mask[(inds, ) + spatial_inds] = masks_chunk\n\n if torch.jit.is_tracing():\n return im_mask.detach().int()\n\n for i in range(N):\n cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())\n return cls_segms\n\n\ndef _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):\n \"\"\"Paste instance masks according to boxes.\n\n This implementation is modified from\n https://github.com/facebookresearch/detectron2/\n\n Args:\n masks (Tensor): N, 1, H, W\n boxes (Tensor): N, 4\n img_h (int): Height of the image to be pasted.\n img_w (int): Width of the image to be pasted.\n skip_empty (bool): Only paste masks within the region that\n tightly bound all boxes, and returns the results this region only.\n An important optimization for CPU.\n\n Returns:\n tuple: (Tensor, tuple). The first item is mask tensor, the second one\n is the slice object.\n If skip_empty == False, the whole image will be pasted. It will\n return a mask of shape (N, img_h, img_w) and an empty tuple.\n If skip_empty == True, only area around the mask will be pasted.\n A mask of shape (N, h', w') and its start and end coordinates\n in the original image will be returned.\n \"\"\"\n # On GPU, paste all masks together (up to chunk size)\n # by using the entire image to sample the masks\n # Compared to pasting them one by one,\n # this has more operations but is faster on COCO-scale dataset.\n device = masks.device\n if skip_empty:\n x0_int, y0_int = torch.clamp(\n boxes.min(dim=0).values.floor()[:2] - 1,\n min=0).to(dtype=torch.int32)\n x1_int = torch.clamp(\n boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)\n y1_int = torch.clamp(\n boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)\n else:\n x0_int, y0_int = 0, 0\n x1_int, y1_int = img_w, img_h\n x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1\n\n N = masks.shape[0]\n\n img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5\n img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5\n img_y = (img_y - y0) / (y1 - y0) * 2 - 1\n img_x = (img_x - x0) / (x1 - x0) * 2 - 1\n # img_x, img_y have shapes (N, w), (N, h)\n # IsInf op is not supported with ONNX<=1.7.0\n if not torch.onnx.is_in_onnx_export():\n if torch.isinf(img_x).any():\n inds = torch.where(torch.isinf(img_x))\n img_x[inds] = 0\n if torch.isinf(img_y).any():\n inds = torch.where(torch.isinf(img_y))\n img_y[inds] = 0\n\n gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))\n gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))\n grid = torch.stack([gx, gy], dim=3)\n\n img_masks = F.grid_sample(\n masks.to(dtype=torch.float32), grid, align_corners=False)\n\n if skip_empty:\n return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))\n else:\n return img_masks[:, 0], ()\n" ]
[ [ "torch.onnx.is_in_onnx_export", "torch.nn.init.kaiming_normal_", "torch.stack", "numpy.ceil", "torch.split", "torch.nn.init.constant_", "numpy.round", "torch.zeros_like", "torch.isinf", "torch.jit.is_tracing", "torch.arange", "torch.zeros", "torch.nn.ReLU", "torch.nn.modules.utils._pair" ] ]
symphonylyh/transformers
[ "03e5d5196ca76008b60da9bb6d604e6bdbcba0db" ]
[ "tests/t5/test_modeling_tf_t5.py" ]
[ "# coding=utf-8\n# Copyright 2018 Google T5 Authors and HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom transformers import T5Config, is_tf_available\nfrom transformers.file_utils import cached_property\nfrom transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow\n\nfrom ..test_configuration_common import ConfigTester\nfrom ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor\n\n\nif is_tf_available():\n import tensorflow as tf\n\n from transformers import ByT5Tokenizer, T5Tokenizer, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model\n\n\nclass TFT5ModelTester:\n def __init__(\n self,\n parent,\n ):\n self.parent = parent\n self.batch_size = 13\n self.seq_length = 7\n self.is_training = True\n self.use_input_mask = True\n self.use_labels = True\n self.vocab_size = 99\n self.n_positions = 14\n self.hidden_size = 32\n self.num_hidden_layers = 5\n self.num_attention_heads = 4\n self.d_ff = 37\n self.relative_attention_num_buckets = 8\n self.dropout_rate = 0.1\n self.initializer_factor = 0.002\n self.eos_token_id = 1\n self.pad_token_id = 0\n self.scope = None\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)\n\n token_labels = None\n if self.use_labels:\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n config = T5Config(\n vocab_size=self.vocab_size,\n n_positions=self.n_positions,\n d_model=self.hidden_size,\n d_ff=self.d_ff,\n d_kv=self.hidden_size // self.num_attention_heads,\n num_layers=self.num_hidden_layers,\n num_heads=self.num_attention_heads,\n relative_attention_num_buckets=self.relative_attention_num_buckets,\n dropout_rate=self.dropout_rate,\n initializer_factor=self.initializer_factor,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.pad_token_id,\n pad_token_id=self.pad_token_id,\n decoder_start_token_id=self.pad_token_id,\n )\n\n return (config, input_ids, input_mask, token_labels)\n\n def create_and_check_t5_model(self, config, input_ids, input_mask, token_labels):\n model = TFT5Model(config=config)\n inputs = {\n \"input_ids\": input_ids,\n \"decoder_input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n result = model(inputs)\n\n result = model(input_ids, decoder_attention_mask=input_mask, decoder_input_ids=input_ids)\n decoder_output = result.last_hidden_state\n decoder_past = result.past_key_values\n encoder_output = result.encoder_last_hidden_state\n self.parent.assertListEqual(list(encoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size])\n self.parent.assertListEqual(list(decoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size])\n # There should be `num_layers` key value embeddings stored in decoder_past[1]\n self.parent.assertEqual(len(decoder_past), config.num_layers)\n # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past[1] tuple\n self.parent.assertEqual(len(decoder_past[0]), 4)\n\n def create_and_check_t5_with_lm_head(self, config, input_ids, input_mask, token_labels):\n model = TFT5ForConditionalGeneration(config=config)\n inputs_dict = {\n \"input_ids\": input_ids,\n \"decoder_input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n\n result = model(inputs_dict)\n\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_t5_decoder_model_past(self, config, input_ids, decoder_input_ids, attention_mask):\n model = TFT5Model(config=config).get_decoder()\n\n input_ids = input_ids[:1, :]\n self.batch_size = 1\n\n # first forward pass\n outputs = model(input_ids, use_cache=True)\n\n outputs_use_cache_conf = model(input_ids)\n outputs_no_past = model(input_ids, use_cache=False)\n\n self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))\n self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # append to next input_ids and\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n\n output_from_no_past = model(next_input_ids)[0]\n output_from_past = model(next_tokens, past_key_values=outputs.past_key_values)[0]\n\n # select random slice\n random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]\n output_from_past_slice = output_from_past[:, 0, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)\n\n def create_and_check_t5_decoder_model_attention_mask_past(\n self, config, input_ids, decoder_input_ids, attention_mask\n ):\n model = TFT5Model(config=config).get_decoder()\n\n # create attention mask\n half_seq_length = self.seq_length // 2\n attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)\n attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)\n attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)\n\n # first forward pass\n outputs = model(input_ids, attention_mask=attn_mask, use_cache=True)\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # change a random masked slice from input_ids\n random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1\n random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)\n vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)\n condition = tf.transpose(\n tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))\n )\n input_ids = tf.where(condition, random_other_next_tokens, input_ids)\n\n # append to next input_ids and attn_mask\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n attn_mask = tf.concat(\n [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)],\n axis=1,\n )\n\n # get two different outputs\n output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[0]\n output_from_past = model(next_tokens, past_key_values=outputs.past_key_values, attention_mask=attn_mask)[0]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).numpy().item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]\n output_from_past_slice = output_from_past[:, 0, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)\n\n def create_and_check_t5_decoder_model_past_large_inputs(\n self, config, input_ids, decoder_input_ids, attention_mask\n ):\n model = TFT5Model(config=config).get_decoder()\n\n input_ids = input_ids[:1, :]\n attention_mask = attention_mask[:1, :]\n self.batch_size = 1\n\n # first forward pass\n outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_attn_mask = ids_tensor((self.batch_size, 3), 2)\n\n # append to next input_ids and\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)\n\n output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]\n output_from_past = model(\n next_tokens, attention_mask=next_attention_mask, past_key_values=outputs.past_key_values\n )[0]\n\n self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])\n\n # select random slice\n random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]\n output_from_past_slice = output_from_past[:, :, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)\n\n def create_and_check_t5_xla_generate(self, config, input_ids, *args):\n config.eos_token_id = None\n config.max_length = 10\n config.do_sample = False\n config.num_beams = 1\n model = TFT5ForConditionalGeneration(config=config)\n\n # make sure there are no pad tokens in prompt\n input_ids = tf.where(input_ids != config.pad_token_id, input_ids, config.pad_token_id + 5)\n\n generated = model.generate(input_ids)\n\n generate_xla = tf.function(model.generate, jit_compile=True)\n generated_xla = generate_xla(input_ids)\n\n self.parent.assertListEqual(generated.numpy().tolist(), generated_xla.numpy().tolist())\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (config, input_ids, input_mask, token_labels) = config_and_inputs\n inputs_dict = {\n \"input_ids\": input_ids,\n \"decoder_input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n return config, inputs_dict\n\n\n@require_tf\nclass TFT5ModelTest(TFModelTesterMixin, unittest.TestCase):\n\n is_encoder_decoder = True\n all_model_classes = (TFT5Model, TFT5ForConditionalGeneration) if is_tf_available() else ()\n all_generative_model_classes = (TFT5ForConditionalGeneration,) if is_tf_available() else ()\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFT5ModelTester(self)\n self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_t5_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_model(*config_and_inputs)\n\n def test_t5_model_v1_1(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n config = config_and_inputs[0]\n config.tie_word_embeddings = False\n config.feed_forward_proj = \"gated-gelu\"\n self.model_tester.create_and_check_t5_model(config, *config_and_inputs[1:])\n\n def test_with_lm_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs)\n\n def test_t5_decoder_model_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_decoder_model_past(*config_and_inputs)\n\n def test_t5_decoder_model_past_with_attn_mask(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_decoder_model_attention_mask_past(*config_and_inputs)\n\n def test_t5_decoder_model_past_large_inputs(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs)\n\n def test_t5_model_xla_generate(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_xla_generate(*config_and_inputs)\n\n def test_model_common_attributes(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)\n\n if model_class in self.all_generative_model_classes:\n x = model.get_output_embeddings()\n assert isinstance(x, tf.keras.layers.Layer)\n name = model.get_bias()\n assert name is None\n else:\n x = model.get_output_embeddings()\n assert x is None\n name = model.get_bias()\n assert name is None\n\n def test_saved_model_creation(self):\n # This test is too long (>30sec) and makes fail the CI\n pass\n\n @slow\n def test_model_from_pretrained(self):\n model = TFT5Model.from_pretrained(\"t5-small\")\n self.assertIsNotNone(model)\n\n def test_generate_with_headmasking(self):\n # TODO: Fix head-masking according to PyTorch T5 model\n pass\n\n @slow\n def test_resize_embeddings(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n original_vocab_size = model.get_input_embeddings().weight.shape[0]\n # the vocab size is defined in the model config\n self.assertEqual(original_vocab_size, model.config.vocab_size)\n\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n tokenizer.add_special_tokens({\"bos_token\": \"\", \"eos_token\": \"\"})\n model._resize_token_embeddings(len(tokenizer))\n # the vocab size is now resized to the length of the tokenizer, which is different from the original size\n self.assertEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer))\n self.assertNotEqual(model.get_input_embeddings().weight.shape[0], original_vocab_size)\n\n\nclass TFT5EncoderOnlyModelTester:\n def __init__(\n self,\n parent,\n vocab_size=99,\n batch_size=13,\n encoder_seq_length=7,\n # For common tests\n use_attention_mask=True,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n d_ff=37,\n relative_attention_num_buckets=8,\n is_training=False,\n dropout_rate=0.1,\n initializer_factor=0.002,\n is_encoder_decoder=False,\n eos_token_id=1,\n pad_token_id=0,\n scope=None,\n ):\n\n self.parent = parent\n self.batch_size = batch_size\n self.encoder_seq_length = encoder_seq_length\n # For common tests\n self.seq_length = self.encoder_seq_length\n self.use_attention_mask = use_attention_mask\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.d_ff = d_ff\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.dropout_rate = dropout_rate\n self.initializer_factor = initializer_factor\n self.eos_token_id = eos_token_id\n self.pad_token_id = pad_token_id\n self.is_encoder_decoder = is_encoder_decoder\n self.scope = None\n self.is_training = is_training\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)\n\n attention_mask = None\n if self.use_attention_mask:\n attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)\n\n config = T5Config(\n vocab_size=self.vocab_size,\n d_model=self.hidden_size,\n d_ff=self.d_ff,\n d_kv=self.hidden_size // self.num_attention_heads,\n num_layers=self.num_hidden_layers,\n num_heads=self.num_attention_heads,\n relative_attention_num_buckets=self.relative_attention_num_buckets,\n dropout_rate=self.dropout_rate,\n initializer_factor=self.initializer_factor,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.pad_token_id,\n pad_token_id=self.pad_token_id,\n is_encoder_decoder=self.is_encoder_decoder,\n )\n\n return (\n config,\n input_ids,\n attention_mask,\n )\n\n def create_and_check_model(\n self,\n config,\n input_ids,\n attention_mask,\n ):\n model = TFT5EncoderModel(config=config)\n result = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n )\n result = model(input_ids=input_ids)\n encoder_output = result.last_hidden_state\n\n self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n attention_mask,\n ) = config_and_inputs\n\n inputs_dict = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n }\n return config, inputs_dict\n\n\nclass TFT5EncoderOnlyModelTest(TFModelTesterMixin, unittest.TestCase):\n is_encoder_decoder = False\n all_model_classes = (TFT5EncoderModel,) if is_tf_available() else ()\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFT5EncoderOnlyModelTester(self)\n self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n # is not able to be part of a pipeline\n def test_train_pipeline_custom_model(self):\n pass\n\n\n@require_tf\n@require_sentencepiece\n@require_tokenizers\nclass TFT5GenerationIntegrationTests(unittest.TestCase):\n @slow\n def test_greedy_xla_generate_simple(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n sentence = \"Translate English to German: Today is a beautiful day.\"\n input_ids = tokenizer(sentence, return_tensors=\"tf\", padding=True).input_ids\n\n xla_generate = tf.function(model.generate, jit_compile=True)\n\n output_ids = model.generate(input_ids)\n output_ids_xla = xla_generate(input_ids)\n\n output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True)\n\n expected_output_string = [\"Heute ist ein schöner Tag.\"]\n\n self.assertListEqual(expected_output_string, output_strings)\n self.assertListEqual(expected_output_string, output_strings_xla)\n\n @slow\n def test_greedy_generate(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n sentences = [\"Yesterday, my name was\", \"Today is a beautiful day and\"]\n input_ids = tokenizer(sentences, return_tensors=\"tf\", padding=True).input_ids\n\n generation_kwargs = {\n \"bad_words_ids\": [tokenizer(\"my\").input_ids, tokenizer(\"ein schöner\").input_ids],\n \"no_repeat_ngram_size\": 3,\n \"do_sample\": False,\n \"repetition_penalty\": 2.2,\n }\n\n output_ids = model.generate(input_ids, **generation_kwargs)\n\n output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n expected_output_string = [\"Yesterday, my name was\", \"Heute ist ein schöne Tag und\"]\n\n self.assertListEqual(expected_output_string, output_strings)\n\n @slow\n def test_sample_generate(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n sentences = [\"I really love my\", \"Translate English to German: the transformers are truly amazing\"]\n input_ids = tokenizer(sentences, return_tensors=\"tf\", padding=True).input_ids\n\n generation_kwargs = {\n \"do_sample\": True,\n \"bad_words_ids\": [tokenizer(\"my\").input_ids, tokenizer(\"ein schöner\").input_ids],\n \"no_repeat_ngram_size\": 3,\n \"repetition_penalty\": 2.2,\n \"temperature\": 0.8,\n \"top_k\": 500,\n \"top_p\": 0.9,\n }\n\n # forces the generation to happen on CPU, to avoid GPU-related quirks\n with tf.device(\":/CPU:0\"):\n tf.random.set_seed(42) # deterministic sampling sequence -> deterministic generation\n output_ids = model.generate(input_ids, **generation_kwargs)\n\n output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n expected_output_string = [\"i love her I really love my heart\", \"die Transformatoren sind wirklich erstaunlich\"]\n\n self.assertListEqual(expected_output_string, output_strings)\n\n\n@require_tf\n@require_sentencepiece\n@require_tokenizers\nclass TFT5ModelIntegrationTests(unittest.TestCase):\n @cached_property\n def model(self):\n return TFT5ForConditionalGeneration.from_pretrained(\"t5-base\")\n\n @slow\n def test_small_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.7.1\n >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary\n\n >>> path_to_mtf_small_t5_checkpoint = '<fill_in>'\n >>> path_to_mtf_small_spm_model_path = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)\n >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"tf\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"tf\").input_ids\n\n loss = model(input_ids, labels=labels).loss\n mtf_score = -tf.math.reduce_sum(loss).numpy()\n\n EXPECTED_SCORE = -19.0845\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_small_v1_1_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.7.1\n >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary\n\n >>> path_to_mtf_small_t5_v1.1_checkpoint = '<fill_in>'\n >>> path_to_mtf_small_spm_model_path = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1.1_checkpoint, batch_size=1, tpu=None)\n >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = TFT5ForConditionalGeneration.from_pretrained(\"google/t5-v1_1-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"google/t5-v1_1-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"tf\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"tf\").input_ids\n\n loss = model(input_ids, labels=labels).loss\n mtf_score = -tf.math.reduce_sum(loss).numpy()\n\n EXPECTED_SCORE = -59.0293\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_small_byt5_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.9.1\n\n >>> path_to_byt5_small_checkpoint = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)\n >>> vocab = t5.data.ByteVocabulary()\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = TFT5ForConditionalGeneration.from_pretrained(\"google/byt5-small\")\n tokenizer = ByT5Tokenizer.from_pretrained(\"google/byt5-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"tf\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"tf\").input_ids\n\n loss = model(input_ids, labels=labels).loss\n mtf_score = -tf.math.reduce_sum(loss).numpy()\n\n EXPECTED_SCORE = -60.7397\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_summarization(self):\n model = self.model\n tok = T5Tokenizer.from_pretrained(\"t5-base\")\n\n FRANCE_ARTICLE = 'Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that \"so far no videos were used in the crash investigation.\" He added, \"A person who has such a video needs to immediately give it to the investigators.\" Robin\\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. \"One can hear cries of \\'My God\\' in several languages,\" Paris Match reported. \"Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing.\" \"It is a very disturbing scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France\\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were \"completely wrong\" and \"unwarranted.\" Cell phones have been collected at the site, he said, but that they \"hadn\\'t been exploited yet.\" Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical \"no.\" Reichelt told \"Erin Burnett: Outfront\" that he had watched the video and stood by the report, saying Bild and Paris Match are \"very confident\" that the clip is real. He noted that investigators only revealed they\\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. \"That is something we did not know before. ... Overall we can say many things of the investigation weren\\'t revealed by the investigation at the beginning,\" he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a \"previous episode of severe depression,\" the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a \"swift and seamless clarification\" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and \"held all the licenses required.\" Earlier, a spokesman for the prosecutor\\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\\'s license. Kumpa emphasized there\\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was \"a big part of his life,\" the source said, it\\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\\'s more to the story, said Brian Russell, a forensic psychologist. \"Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\\'t going to keep doing their job and they\\'re upset about that and so they\\'re suicidal,\" he said. \"But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\\'s problems.\" Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.' # @noqa\n\n SHORTER_ARTICLE = '(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed \"in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014.\" Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\\'s ceremony, said it was a move toward greater justice. \"As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice,\" he said, according to an ICC news release. \"Indeed, today brings us closer to our shared goals of justice and peace.\" Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. \"As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly,\" she said. Rights group Human Rights Watch welcomed the development. \"Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\\'s treaty should speak out to welcome its membership,\" said Balkees Jarrah, international justice counsel for the group. \"What\\'s objectionable is the attempts to undermine international justice, not Palestine\\'s decision to join a treaty to which over 100 countries around the world are members.\" In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it \"strongly\" disagreed with the court\\'s decision. \"As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC,\" the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. \"We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,\" it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as \"Palestine.\" While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would \"conduct its analysis in full independence and impartiality.\" The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.'\n\n IRAN_ARTICLE = \"(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger. Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a letter to the Iranian leadership warning them away from a deal. The debate that has already begun since the announcement of the new framework will likely result in more heat than light. It will not be helped by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: . The most misleading assertion, despite universal rejection by experts, is that the negotiations' objective at the outset was the total elimination of any nuclear program in Iran. That is the position of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it had been, there would have been no Iranian team at the negotiating table. Rather, the objective has always been to structure an agreement or series of agreements so that Iran could not covertly develop a nuclear arsenal before the United States and its allies could respond. The new framework has exceeded expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite sharp accusations by some in the United States and its allies, Iran denies having such a program, and U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's continued cooperation with International Atomic Energy Agency inspections is further evidence on this point, and we'll know even more about Iran's program in the coming months and years because of the deal. In fact, the inspections provisions that are part of this agreement are designed to protect against any covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter warning that a deal might be killed by Congress or a future president). This of course is not the case. The talks were between Iran and the five permanent members of the U.N. Security Council (United States, United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the agreement should be a formal treaty requiring the Senate to \\\"advise and consent.\\\" But the issue is not suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement with Iran will not be so balanced. The restrictions and obligations in the final framework agreement will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally some insist that any agreement must address Iranian missile programs, human rights violations or support for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in the negotiations would be a poison pill. This agreement should be judged on its merits and on how it affects the security of our negotiating partners and allies, including Israel. Those judgments should be fact-based, not based on questionable assertions or dubious assumptions.\"\n\n ARTICLE_SUBWAY = 'New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared \"I do\" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her \"first and only\" marriage. Barrientos, now 39, is facing two criminal counts of \"offering a false instrument for filing in the first degree,\" referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\\'s Investigation Division. Seven of the men are from so-called \"red-flagged\" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.'\n\n expected_summaries = [\n 'prosecutor: \"so far no videos were used in the crash investigation\" two magazines claim to have found a cell phone video of the final seconds . \"one can hear cries of \\'My God\\' in several languages,\" one magazine says .',\n \"the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a preliminary examination into the situation in the occupied Palestinian territory . as members of the court, Palestinians may be subject to counter-charges as well .\",\n \"the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller: the debate that has already begun since the announcement of the new framework will likely result in more heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and implement a rigorous inspection regime .\",\n 'prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two criminal counts of \"offering a false instrument for filing in the first degree\" she has been married 10 times, with nine of her marriages occurring between 1999 and 2002 .',\n ]\n\n task_specific_config = getattr(model.config, \"task_specific_params\", {})\n summarization_config = task_specific_config.get(\"summarization\", {})\n model.config.update(summarization_config)\n\n dct = tok(\n [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]],\n max_length=512,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"tf\",\n )\n self.assertEqual(512, dct[\"input_ids\"].shape[1])\n\n hypotheses_batch = model.generate(\n input_ids=dct[\"input_ids\"],\n attention_mask=dct[\"attention_mask\"],\n num_beams=4,\n length_penalty=2.0,\n max_length=142,\n min_length=56,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n\n decoded = [\n tok.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in hypotheses_batch\n ]\n\n self.assertListEqual(\n expected_summaries,\n decoded,\n )\n\n @slow\n def test_translation_en_to_de(self):\n tok = T5Tokenizer.from_pretrained(\"t5-base\")\n model = self.model\n\n task_specific_config = getattr(model.config, \"task_specific_params\", {})\n translation_config = task_specific_config.get(\"translation_en_to_de\", {})\n self.model.config.update(translation_config)\n\n original_input = '\"Luigi often said to me that he never wanted the brothers to end up in court\", she wrote.'\n expected_translation = (\n '\"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen\", schrieb sie.'\n )\n\n input_ids = tok.encode(model.config.prefix + original_input, return_tensors=\"tf\")\n\n output = model.generate(\n input_ids=input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=50,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n\n self.assertEqual(translation, expected_translation)\n\n @slow\n def test_translation_en_to_fr(self):\n model = self.model\n tok = T5Tokenizer.from_pretrained(\"t5-base\")\n\n task_specific_config = getattr(model.config, \"task_specific_params\", {})\n translation_config = task_specific_config.get(\"translation_en_to_fr\", {})\n model.config.update(translation_config)\n\n en_text = ' This image section from an infrared recording by the Spitzer telescope shows a \"family portrait\" of countless generations of stars: the oldest stars are seen as blue dots. '\n\n new_truncated_translation = (\n \"Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre \"\n \"un \"\n \"« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées \"\n \"sous forme \"\n \"de points bleus.\"\n )\n\n input_ids = tok(model.config.prefix + en_text, return_tensors=\"tf\").input_ids\n\n output = model.generate(\n input_ids=input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=100,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n\n self.assertEqual(translation, new_truncated_translation)\n\n @slow\n def test_translation_en_to_ro(self):\n model = self.model\n tok = T5Tokenizer.from_pretrained(\"t5-base\")\n\n task_specific_config = getattr(model.config, \"task_specific_params\", {})\n translation_config = task_specific_config.get(\"translation_en_to_ro\", {})\n model.config.update(translation_config)\n\n original_input = \"Taco Bell said it plans to add 2,000 locations in the US by 2022.\"\n expected_translation = \"Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022.\"\n\n input_ids = tok.encode(model.config.prefix + original_input, return_tensors=\"tf\")\n\n output = model.generate(\n input_ids=input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=50,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n\n self.assertEqual(translation, expected_translation)\n\n def test_finetune_keras_trainer(self):\n \"\"\"Ensure that the model can be fine-tuned via the keras API and\n that metrics work as expected.\n \"\"\"\n\n # This metric expects to be called with the logits output\n def _accuracy(y_true, y_pred):\n return tf.keras.metrics.sparse_categorical_crossentropy(y_true[:, 0], y_pred[:, 0])\n\n # measure the accuracy of the first token\n class FirstTokenAccuracy(tf.keras.metrics.MeanMetricWrapper):\n def __init__(self, name=\"accuracy\", **kwargs):\n super().__init__(_accuracy, name=name, **kwargs)\n\n model = self.model\n model.compile(\"adam\", metrics=FirstTokenAccuracy())\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n examples = [\n (\"sentiment: Everything is awesome!\", \"positive\"),\n (\"sentiment: Tensorflow datasets are hard to use\", \"negative\"),\n ]\n\n inputs = dict(tokenizer([x[0] for x in examples], padding=True, return_tensors=\"tf\"))\n inputs[\"labels\"] = tokenizer([x[1] for x in examples], return_tensors=\"tf\").input_ids\n\n model.fit(inputs)\n m = model.evaluate(inputs)\n self.assertEqual(len(m), 2)\n" ]
[ [ "tensorflow.debugging.assert_near", "tensorflow.zeros", "tensorflow.math.reduce_sum", "tensorflow.function", "tensorflow.range", "tensorflow.ones", "tensorflow.device", "tensorflow.expand_dims", "tensorflow.where", "tensorflow.concat", "tensorflow.keras.metrics.sparse_categorical_crossentropy", "tensorflow.random.set_seed" ] ]
SantiagoJN/spatialaudiogen
[ "5092b8988731f9704914beb44c5688a819508ade" ]
[ "pyutils/iolib/audio.py" ]
[ "import os\nimport scipy.signal\nimport numpy as np\nfrom soundfile import SoundFile\nfrom pyutils.iolib.video import getFFprobeMeta\nfrom pyutils.cmd import runSystemCMD\n# from scikits.audiolab import Sndfile, Format\nimport tempfile\nimport resampy\n# import librosa\n\n\ndef load_wav(fname, rate=None):\n # fp = Sndfile(fname, 'r')\n fp = SoundFile(fname, 'r')\n #_signal = fp.read_frames(fp.nframes)\n _signal = fp.buffer_read(dtype=\"int32\")\n _signal = np.asarray(_signal).reshape((-1, fp.channels))\n _rate = fp.samplerate\n\n if _signal.ndim == 1:\n _signal.reshape((-1, 1))\n if rate is not None and rate != _rate:\n # _num_frames = _signal.shape[0]\n # _duration = _num_frames / float(_rate)\n # signal = scipy.signal.resample(_signal, int(rate * _duration))\n signal = resampy.resample(_signal, _rate, rate, axis=0, filter='kaiser_fast')\n else:\n signal = _signal\n rate = _rate\n\n return signal, rate\n\ndef save_wav(fname, signal, rate):\n fp = SoundFile(fname, 'w', rate, signal.shape[1])\n #fp.write(fname, signal, rate)\n #print(f'########################fp: {fp}')\n fp.write(signal)\n # with SoundFile(fname, 'w', rate, signal.shape[1], 'PCM_24') as f:\n # f.write(signal)\n #fp.close()\n\n # Intento 3\n # y, sr = librosa.load(librosa.util.example_audio_file(), duration=5.0)\n # librosa.output.write_wav(fname, signal, rate)\n # fp = SoundFile(fname, 'w', rate, signal.shape[1])\n # # d, sr = fp.read()\n # fp.write(signal)\n\n # Intento 4\n \n\n\ndef convert2wav(inp_fn, out_fn, rate=None):\n cmd = ['ffmpeg', '-y',\n '-i', inp_fn,\n '-map', '0:a',\n '-acodec', 'pcm_s16le']\n if rate is not None:\n cmd += ['-ar', str(rate),]\n cmd += [out_fn]\n\n stdout, stderr = runSystemCMD(' '.join(cmd))\n if any([l.startswith('Output file is empty,')\n for l in stderr.split('\\n')]):\n raise (ValueError, 'Output file is empty.\\n' + stderr)\n\n\nclass AudioReader:\n def __init__(self, fn, rate=None, pad_start=0, seek=None, duration=None, rotation=None):\n fp = Sndfile(fn, 'r') if fn.endswith('.wav') else None\n if fp is None or (rate is not None and fp.samplerate != rate):\n # Convert to wav file\n if not os.path.isdir('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/'):\n os.makedirs('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/')\n snd_file = tempfile.NamedTemporaryFile('w', prefix='c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/', suffix='.wav', delete=False)\n snd_file.close()\n\n convert2wav(fn, snd_file.name, rate)\n self.snd_fn = snd_file.name\n self.rm_flag = True\n\n else:\n self.snd_fn = fn\n self.rm_flag = False\n\n self.fp = Sndfile(self.snd_fn, 'r')\n self.num_channels = self.fp.channels\n self.rate = self.fp.samplerate\n self.num_frames = self.fp.nframes\n self.duration = self.num_frames / float(self.rate)\n\n self.k = 0\n self.pad = pad_start\n\n if seek is not None and seek > 0:\n num_frames = int(seek * self.rate)\n self.fp.read_frames(num_frames)\n else:\n seek = 0\n\n if duration is not None:\n self.duration = min(duration, self.duration-seek)\n self.num_frames = int(self.duration * self.rate)\n\n if rotation is not None:\n assert self.num_channels > 2 # Spatial audio\n assert -np.pi <= rotation < np.pi\n c = np.cos(rotation)\n s = np.sin(rotation)\n rot_mtx = np.array([[1, 0, 0, 0], # W' = W\n [0, c, 0, s], # Y' = X sin + Y cos\n [0, 0, 1, 0], # Z' = Z\n [0, -s, 0, c]]) # X' = X cos - Y sin\n self.rot_mtx = rot_mtx\n else:\n self.rot_mtx = None\n\n def __del__(self):\n if self.rm_flag:\n os.remove(self.snd_fn)\n\n def get_chunk(self, n=1, force_size=False):\n if self.k >= self.num_frames:\n return None\n\n frames_left = self.num_frames - self.k\n if force_size and n > frames_left:\n return None\n\n # Pad zeros to start\n if self.pad > 0:\n pad_size = min(n, self.pad)\n pad_chunk = np.zeros((pad_size, self.num_channels))\n n -= pad_size\n self.pad -= pad_size\n else:\n pad_chunk = None\n\n # Read frames\n chunk_size = min(n, frames_left)\n chunk = self.fp.read_frames(chunk_size)\n chunk = chunk.reshape((chunk.shape[0], self.num_channels))\n self.k += chunk_size\n\n if pad_chunk is not None:\n chunk = np.concatenate((pad_chunk.astype(chunk.dtype), chunk), 0)\n\n if self.rot_mtx is not None:\n chunk = np.dot(chunk, self.rot_mtx.T)\n\n return chunk\n\n def loop_chunks(self, n=1, force_size=False):\n while True:\n chunk = self.get_chunk(n, force_size=False)\n if chunk is None:\n break\n yield chunk\n\nclass AudioReader2:\n def __init__(self, audio_folder, rate=None,\n seek=0, duration=None, rotation=None):\n self.audio_folder = audio_folder\n\n fns = os.listdir(audio_folder)\n self.num_files = len(fns)\n\n # fp = Sndfile(os.path.join(self.audio_folder, fns[0]), 'r')\n fp = SoundFile(os.path.join(self.audio_folder, fns[0]), 'r')\n data, fps = load_wav(os.path.join(self.audio_folder, fns[0]))\n self.rate = float(fp.samplerate) if rate is not None else fps\n self.num_channels = fp.channels\n self.duration = self.num_files\n self.num_frames = int(self.duration * rate)\n\n self.cur_frame = int(seek * self.rate)\n self.time = self.cur_frame / self.rate\n\n self.max_time = self.duration\n if duration is not None:\n self.max_time = min(seek + duration, self.max_time)\n\n if rotation is not None:\n assert self.num_channels > 2 # Spatial audio\n assert -np.pi <= rotation < np.pi\n c = np.cos(rotation)\n s = np.sin(rotation)\n rot_mtx = np.array([[1, 0, 0, 0], # W' = W\n [0, c, 0, s], # Y' = X sin + Y cos\n [0, 0, 1, 0], # Z' = Z\n [0, -s, 0, c]]) # X' = X cos - Y sin\n self.rot_mtx = rot_mtx\n else:\n self.rot_mtx = None\n\n def get(self, start_time, size):\n index = range(int(start_time), int(start_time + size / self.rate) + 1)\n fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))\n for i in index]\n chunk = []\n for fn in fns:\n if not os.path.exists(fn):\n return None\n data, _ = load_wav(fn, self.rate)\n chunk.append(data)\n\n chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]\n ss = int((start_time - int(start_time)) * self.rate)\n chunk = chunk[ss:ss+size, :]\n\n return chunk\n\n def get_chunk(self, n=1, force_size=False):\n if self.time >= self.max_time:\n return None\n\n frames_left = int((self.max_time - self.time) * self.rate)\n if force_size and n > frames_left:\n return None\n\n # Read frames\n chunk_size = min(n, frames_left)\n start_time = self.cur_frame / self.rate\n end_frame_no = self.cur_frame + chunk_size - 1\n end_time = end_frame_no / self.rate\n\n index = range(int(start_time), int(end_time) + 1)\n fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))\n for i in index]\n chunk = []\n for fn in fns:\n data, _ = load_wav(fn, self.rate)\n chunk.append(data)\n chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]\n ss = int((self.time - int(self.time)) * self.rate)\n chunk = chunk[ss:ss+chunk_size, :]\n self.cur_frame += chunk.shape[0]\n self.time = self.cur_frame / self.rate\n\n if self.rot_mtx is not None:\n chunk = np.dot(chunk, self.rot_mtx.T)\n\n return chunk\n\n def loop_chunks(self, n=1, force_size=False):\n while True:\n chunk = self.get_chunk(n, force_size=False)\n if chunk is None:\n break\n yield chunk\n\n\ndef test_audio_reader():\n reader = AudioReader2('/gpu2_data/morgado/spatialaudiogen/youtube/train/687gkvLi5kI/ambix',\n rate=10000, seek=0, duration=5.5)\n for s in reader.loop_chunks(10000):\n print(s.shape), s.max(), s.min()\n# test_audio_reader()\n\n" ]
[ [ "numpy.zeros", "numpy.concatenate", "numpy.asarray", "numpy.cos", "numpy.array", "numpy.sin", "numpy.dot" ] ]
mruffalo/epiScanpy
[ "bcb86347d2b8451c384f97162625c8d5efb27ffc" ]
[ "episcanpy/preprocessing/_readimpute.py" ]
[ "import numpy as np\nimport anndata as ad\nimport pandas as pd\n\ndef load_met_noimput(matrix_file, path='', save=False):\n \"\"\"\n read the raw count matrix and convert it into an AnnData object.\n write down the matrix as .h5ad (AnnData object) if save = True.\n Return AnnData object\n \"\"\"\n matrix = []\n cell_names = []\n feature_names = []\n with open(path+matrix_file) as f:\n line = f.readline()[:-2].split('\\t')\n if line[0] == 'sample_name':\n feature_names = line[1:]\n else:\n matrix.append(line[1:])\n cell_names.append(line[0])\n if matrix == []:\n line = f.readline()[:-2].split('\\t')\n matrix.append(line[1:])\n cell_names.append(line[0])\n for line in f:\n line = line[:-2].split('\\t')\n matrix.append(line[1:])\n cell_names.append(line[0])\n\n matrix = np.array(matrix)\n \n if feature_names != []:\n adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names), var=pd.DataFrame(index=feature_names))\n else:\n adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names))\n \n adata.uns['omic'] = 'methylation'\n adata.uns['imputation'] = 'no_imputation'\n \n if save:\n adata.write(\"\".join([\".\".split(matrix_file)[0],'.h5ad']))\n \n return(adata)\n\ndef imputation_met(adata, number_cell_covered=10, imputation_value='mean', save=None, copy=False):\n \"\"\"\n Impute missing values in methyaltion level matrices. The imputsation is based on the average\n methylation value of the given variable.\n It also filter out variables that are covered in an unsufficient number of cells in order to \n reduce the feature space to meaningful variables and discard potential coverage biases. \n\n Parameters\n ----------\n adata: AnnData object containing 'nan'\n\n number_cell_covered: minimum number of cells to be covered in order to retain a variable\n\n imputation_value: imputation of the missing value can be made either on the mean or the median\n\n Return\n ------\n Return a new AnnData object\n\n \n \n \"\"\"\n\n # This step need to be sped up and could be multithread.\n # Only the mean available for now. And only the minimum number of cells covered and not the variety of the \n # methylation levels\n # also, it odes not return the variable annoations and force to add 2 values\n old_features = adata.var_names.tolist()\n \n new_matrix = []\n new_features_name = []\n means = []\n medians = []\n feat_nb = 0\n\n length1 = len(adata.X[0,:])\n length2 = len(adata.X[:,0])\n adata.obs['coverage_cells'] = [length1 - np.isnan(line).sum() for line in adata.X]\n adata.obs['mean_cell_methylation'] = [np.nansum(line)/length1 for line in adata.X]\n adata.var['coverage_feature'] = [length2 - np.isnan(line).sum() for line in adata.X.T]\n adata.var['mean_feature_methylation'] = [np.nansum(line)/length2 for line in adata.X.T]\n\n adata2 = adata[:, adata.var['coverage_feature']>=number_cell_covered].copy()\n\n for index in range(len(adata2.var_names.tolist())):\n adata2.X[:,index] = np.nan_to_num(adata2.X[:,index], nan=adata2.var['mean_feature_methylation'][index])\n\n\n if save!= None:\n adata2.write(save.rstrip('.h5ad')+'.h5ad')\n if copy==False:\n adata = adata2.copy()\n else:\n return(adata2)\n\n\n\n\ndef readandimputematrix(file_name, min_coverage=1):\n \"\"\"\n Temporary function to load and impute methyaltion count matrix into an AnnData object\n \n Parameters\n ----------\n file_name : file name to read and load\n \n min_coverage : minimum number of cells covered for which we keep and impute a variable\n \n Returns\n -------\n adata : :class:`~anndata.AnnData`\n Annotated data matrix.\n \n \"\"\"\n with open(file_name) as f:\n file = f.readlines()\n\n # separate annotation from data \n head_var = file[0]\n head_var = head_var.split('\\t')\n # Then, extract the sample names\n sample_names = []\n data_raw = []\n for l in file[1:]:\n l = l.split('\\t')\n sample_names.append(l[0])\n data_raw.append(l[1:])\n\n # clear memory of useless variables \n del file\n \n ##########################################\n # now, removing empty columns\n empties = []\n partial = []\n full = []\n for index in range(1, len(data_raw[0])):\n column = [element[index] for element in data_raw]\n if len(list(set(column))) == 1:\n empties.append(index)\n elif len(list(set(column))) <= min_coverage:\n partial.append(index)\n else:\n full.append(index)\n \n ##########################################\n intermed_matrix = []\n name_windows_covered = []\n # let's remove the compltetly uninformative columns\n for index in range(1, len(head_var[1:])):\n if index in full:\n intermed_matrix.append([element[index] for element in data_raw])\n name_windows_covered.append(head_var[index])\n\n ########################################\n # imputing values.\n imputed_matrix = []\n for row in intermed_matrix:\n imputed_row = []\n if \"nan\" in row:\n mean = np.mean([float(e) for e in row if e != \"nan\"])\n for element in row:\n if element == \"nan\":\n imputed_row.append(str(mean))\n else: \n imputed_row.append(element)\n imputed_matrix.append(imputed_row)\n else:\n imputed_matrix.append(row)\n\n imputed_matrix = np.matrix(imputed_matrix).transpose()\n return(ad.AnnData(imputed_matrix, obs=pd.DataFrame(index=sample_names), var=pd.DataFrame(index=name_windows_covered)))\n #return(imputed_matrix, sample_names, name_windows_covered)\n" ]
[ [ "numpy.matrix", "pandas.DataFrame", "numpy.nansum", "numpy.isnan", "numpy.array", "numpy.nan_to_num" ] ]
yourwanghao/Ultrasound_Nerve_Segmentation
[ "9a73cdb9a97b27c375a1023f4426d7e5a89b6a4d" ]
[ "training_curves.py" ]
[ "#!/usr/bin/env python\n\n## based on https://github.com/dmlc/mxnet/issues/1302\n## Parses the model fit log file and generates a train/val vs epoch plot\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport re\nimport argparse\n\ndef log_train_metric(period, auto_reset=False):\n \"\"\"Callback to log the training evaluation result every period.\n\n Parameters\n ----------\n period : int\n The number of batch to log the training evaluation metric.\n auto_reset : bool\n Reset the metric after each log\n\n Returns\n -------\n callback : function\n The callback function that can be passed as iter_epoch_callback to fit.\n \"\"\"\n def _callback(param):\n \"\"\"The checkpoint function.\"\"\"\n if param.nbatch % period == 0 and param.eval_metric is not None:\n name_value = param.eval_metric.get_name_value()\n for name, value in name_value:\n logging.info('Iter[%d] Batch[%d] Train-%s=%f',\n param.epoch, param.nbatch, name, value)\n if auto_reset:\n param.eval_metric.reset()\n return _callback\n\nparser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')\nparser.add_argument('--log-file', type=str,default=\"log_tr_va\",\n help='the path of log file')\nargs = parser.parse_args()\n\nprint('ok')\n\n\nTR_RE = re.compile('\\s+Train-dicecoef=([\\d\\.]+)')\nVA_RE = re.compile('.*?]\\sValidation-dicecoef=([\\d\\.]+)')\n\nlog = open(args.log_file).read()\n\nlog_tr = [float(x) for x in TR_RE.findall(log)]\nlog_va = [float(x) for x in VA_RE.findall(log)]\nidx = np.arange(len(log_tr))\n\nprint(len(log_tr), len(log_va))\n\n\nplt.figure(figsize=(8, 6))\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Accuracy\")\nplt.plot(idx, log_tr, 'o', linestyle='-', color=\"r\",\n label=\"Train dicecoef\")\n\nplt.plot(idx, log_va, 'o', linestyle='-', color=\"b\",\n label=\"Validation dicecoef\")\n\nplt.legend(loc=\"best\")\nplt.xticks(np.arange(min(idx), max(idx)+1, 5))\nplt.yticks(np.arange(0, 1, 0.2))\nplt.ylim([0,1])\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
rxbtz/tensorflow
[ "499f7ed810928e29986453c83778f71e2b351eb5" ]
[ "tensorflow/python/keras/_impl/keras/layers/embeddings.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Embedding layer.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.keras._impl.keras import backend as K\nfrom tensorflow.python.keras._impl.keras import constraints\nfrom tensorflow.python.keras._impl.keras import initializers\nfrom tensorflow.python.keras._impl.keras import regularizers\nfrom tensorflow.python.keras._impl.keras.engine import Layer\nfrom tensorflow.python.keras._impl.keras.utils import tf_utils\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('keras.layers.Embedding')\nclass Embedding(Layer):\n \"\"\"Turns positive integers (indexes) into dense vectors of fixed size.\n\n eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]\n\n This layer can only be used as the first layer in a model.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Embedding(1000, 64, input_length=10))\n # the model will take as input an integer matrix of size (batch,\n input_length).\n # the largest integer (i.e. word index) in the input should be no larger\n than 999 (vocabulary size).\n # now model.output_shape == (None, 10, 64), where None is the batch\n dimension.\n\n input_array = np.random.randint(1000, size=(32, 10))\n\n model.compile('rmsprop', 'mse')\n output_array = model.predict(input_array)\n assert output_array.shape == (32, 10, 64)\n ```\n\n Arguments:\n input_dim: int > 0. Size of the vocabulary,\n i.e. maximum integer index + 1.\n output_dim: int >= 0. Dimension of the dense embedding.\n embeddings_initializer: Initializer for the `embeddings` matrix.\n embeddings_regularizer: Regularizer function applied to\n the `embeddings` matrix.\n embeddings_constraint: Constraint function applied to\n the `embeddings` matrix.\n mask_zero: Whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n This is useful when using recurrent layers\n which may take variable length input.\n If this is `True` then all subsequent layers\n in the model need to support masking or an exception will be raised.\n If mask_zero is set to True, as a consequence, index 0 cannot be\n used in the vocabulary (input_dim should equal size of\n vocabulary + 1).\n input_length: Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n\n Input shape:\n 2D tensor with shape: `(batch_size, sequence_length)`.\n\n Output shape:\n 3D tensor with shape: `(batch_size, sequence_length, output_dim)`.\n\n \"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n embeddings_initializer='uniform',\n embeddings_regularizer=None,\n activity_regularizer=None,\n embeddings_constraint=None,\n mask_zero=False,\n input_length=None,\n **kwargs):\n if 'input_shape' not in kwargs:\n if input_length:\n kwargs['input_shape'] = (input_length,)\n else:\n kwargs['input_shape'] = (None,)\n dtype = kwargs.pop('dtype', K.floatx())\n super(Embedding, self).__init__(dtype=dtype, **kwargs)\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.embeddings_initializer = initializers.get(embeddings_initializer)\n self.embeddings_regularizer = regularizers.get(embeddings_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.embeddings_constraint = constraints.get(embeddings_constraint)\n self.mask_zero = mask_zero\n self.input_length = input_length\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n self.embeddings = self.add_weight(\n shape=(self.input_dim, self.output_dim),\n initializer=self.embeddings_initializer,\n name='embeddings',\n regularizer=self.embeddings_regularizer,\n constraint=self.embeddings_constraint)\n self.built = True\n\n def compute_mask(self, inputs, mask=None):\n if not self.mask_zero:\n return None\n else:\n return math_ops.not_equal(inputs, 0)\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if self.input_length is None:\n return input_shape + (self.output_dim,)\n else:\n # input_length can be tuple if input is 3D or higher\n if isinstance(self.input_length, (list, tuple)):\n in_lens = list(self.input_length)\n else:\n in_lens = [self.input_length]\n if len(in_lens) != len(input_shape) - 1:\n ValueError('\"input_length\" is %s, but received input has shape %s' %\n (str(self.input_length), str(input_shape)))\n else:\n for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n if s1 is not None and s2 is not None and s1 != s2:\n ValueError('\"input_length\" is %s, but received input has shape %s' %\n (str(self.input_length), str(input_shape)))\n elif s1 is None:\n in_lens[i] = s2\n return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n\n def call(self, inputs):\n dtype = K.dtype(inputs)\n if dtype != 'int32' and dtype != 'int64':\n inputs = math_ops.cast(inputs, 'int32')\n out = embedding_ops.embedding_lookup(self.embeddings, inputs)\n return out\n\n def get_config(self):\n config = {\n 'input_dim':\n self.input_dim,\n 'output_dim':\n self.output_dim,\n 'embeddings_initializer':\n initializers.serialize(self.embeddings_initializer),\n 'embeddings_regularizer':\n regularizers.serialize(self.embeddings_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'embeddings_constraint':\n constraints.serialize(self.embeddings_constraint),\n 'mask_zero':\n self.mask_zero,\n 'input_length':\n self.input_length\n }\n base_config = super(Embedding, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n" ]
[ [ "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.math_ops.not_equal", "tensorflow.python.keras._impl.keras.initializers.serialize", "tensorflow.python.ops.embedding_ops.embedding_lookup", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.keras._impl.keras.constraints.serialize", "tensorflow.python.keras._impl.keras.regularizers.serialize", "tensorflow.python.keras._impl.keras.backend.dtype", "tensorflow.python.keras._impl.keras.initializers.get", "tensorflow.python.keras._impl.keras.regularizers.get", "tensorflow.python.keras._impl.keras.backend.floatx", "tensorflow.python.keras._impl.keras.constraints.get" ] ]
martinRenou/chaco
[ "1888da3ecee89f9b2d11900cda9333b32fc5e89a" ]
[ "chaco/tests/test_grid_data_source.py" ]
[ "\"\"\"\nTests of GridDataSource behavior.\n\"\"\"\n\nimport unittest\n\nfrom numpy import array\nfrom numpy.testing import assert_array_equal\n\nfrom chaco.api import GridDataSource\nfrom traits.testing.unittest_tools import UnittestTools\n\n\nclass GridDataSourceTestCase(UnittestTools, unittest.TestCase):\n\n def setUp(self):\n self.data_source = GridDataSource(\n xdata=array([1, 2, 3]),\n ydata=array([1.5, 0.5, -0.5, -1.5]),\n sort_order=('ascending', 'descending'))\n\n def test_empty(self):\n data_source = GridDataSource()\n self.assertEqual(data_source.sort_order, ('none', 'none'))\n self.assertEqual(data_source.index_dimension, 'image')\n self.assertEqual(data_source.value_dimension, 'scalar')\n self.assertEqual(data_source.metadata,\n {\"selections\":[], \"annotations\":[]})\n xdata, ydata = data_source.get_data()\n assert_array_equal(xdata.get_data(), array([]))\n assert_array_equal(ydata.get_data(), array([]))\n self.assertEqual(data_source.get_bounds(), ((0,0),(0,0)))\n\n def test_init(self):\n test_xd = array([1, 2, 3])\n test_yd = array([1.5, 0.5, -0.5, -1.5])\n test_sort_order = ('ascending', 'descending')\n\n self.assertEqual(self.data_source.sort_order, test_sort_order)\n xd, yd = self.data_source.get_data()\n assert_array_equal(xd.get_data(), test_xd)\n assert_array_equal(yd.get_data(), test_yd)\n self.assertEqual(self.data_source.get_bounds(),\n ((min(test_xd),min(test_yd)),\n (max(test_xd),max(test_yd))))\n\n def test_set_data(self):\n\n test_xd = array([0,2,4])\n test_yd = array([0,1,2,3,4,5])\n test_sort_order = ('none', 'none')\n\n self.data_source.set_data(xdata=test_xd, ydata=test_yd,\n sort_order=('none', 'none'))\n\n self.assertEqual(self.data_source.sort_order, test_sort_order)\n xd, yd = self.data_source.get_data()\n assert_array_equal(xd.get_data(), test_xd)\n assert_array_equal(yd.get_data(), test_yd)\n self.assertEqual(self.data_source.get_bounds(),\n ((min(test_xd),min(test_yd)),\n (max(test_xd),max(test_yd))))\n\n def test_metadata(self):\n self.assertEqual(self.data_source.metadata,\n {'annotations': [], 'selections': []})\n\n def test_metadata_changed(self):\n with self.assertTraitChanges(self.data_source, 'metadata_changed', count=1):\n self.data_source.metadata = {'new_metadata': True}\n\n def test_metadata_items_changed(self):\n with self.assertTraitChanges(self.data_source, 'metadata_changed', count=1):\n self.data_source.metadata['new_metadata'] = True\n" ]
[ [ "numpy.array" ] ]
dewyman/TALON-paper-2019
[ "8644b34573d6a5924e8d84a234fd0fcbf010c233" ]
[ "pipeline/table_figure_scripts/rename_abundance_file_datasets.py" ]
[ "import pandas as pd \nfrom collections import defaultdict\nimport argparse\n\nparser = argparse.ArgumentParser(description=\\\n\t'Renames PacBio and ONT datasets with more\\\n\t intelligent names')\nparser.add_argument('--f', help='file to swap dataset col names in')\nargs = parser.parse_args()\nf = args.f\n\n# read in mapping file\nmap_df = pd.read_csv('dataset_id_name_map.tsv', sep='\\t')\nmap_df.set_index('dataset_id', inplace=True)\nmap_dict = map_df.to_dict()\n\ndf = pd.read_csv(f, sep='\\t')\ndf.rename(columns=map_dict['dataset_name'], inplace=True)\ndf.to_csv(f, sep='\\t', index=False)\n\n\n" ]
[ [ "pandas.read_csv" ] ]
gmwang18/pyscf
[ "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac" ]
[ "scf/_vhf.py" ]
[ "#!/usr/bin/env python\n\nimport sys\nimport ctypes\nimport _ctypes\nimport numpy\nimport pyscf.lib\nfrom pyscf import gto\nfrom pyscf.gto.moleintor import make_cintopt\n\nlibcvhf = pyscf.lib.load_library('libcvhf')\ndef _fpointer(name):\n return ctypes.c_void_p(_ctypes.dlsym(libcvhf._handle, name))\n\nclass VHFOpt(object):\n def __init__(self, mol, intor,\n prescreen='CVHFnoscreen', qcondname=None, dmcondname=None):\n self._this = ctypes.POINTER(_CVHFOpt)()\n #print self._this.contents, expect ValueError: NULL pointer access\n self._intor = _fpointer(intor)\n self._cintopt = pyscf.lib.c_null_ptr()\n self._dmcondname = dmcondname\n self.init_cvhf_direct(mol, intor, prescreen, qcondname)\n\n def init_cvhf_direct(self, mol, intor, prescreen, qcondname):\n c_atm = numpy.asarray(mol._atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(mol._bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(mol._env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n self._cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n\n# libcvhf.CVHFnr_optimizer(ctypes.byref(self._this),\n# c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n# c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n# c_env.ctypes.data_as(ctypes.c_void_p))\n libcvhf.CVHFinit_optimizer(ctypes.byref(self._this),\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n self._this.contents.fprescreen = _fpointer(prescreen)\n\n if prescreen != 'CVHFnoscreen':\n fsetqcond = getattr(libcvhf, qcondname)\n fsetqcond(self._this,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n @property\n def direct_scf_tol(self):\n return self._this.contents.direct_scf_cutoff\n @direct_scf_tol.setter\n def direct_scf_tol(self, v):\n self._this.contents.direct_scf_cutoff = v\n\n def set_dm(self, dm, atm, bas, env):\n if self._dmcondname is not None:\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n if isinstance(dm, numpy.ndarray) and dm.ndim == 2:\n n_dm = 1\n else:\n n_dm = len(dm)\n dm = numpy.asarray(dm, order='C')\n fsetdm = getattr(libcvhf, self._dmcondname)\n fsetdm(self._this,\n dm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(n_dm),\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\nclass _CVHFOpt(ctypes.Structure):\n _fields_ = [('nbas', ctypes.c_int),\n ('_padding', ctypes.c_int),\n ('direct_scf_cutoff', ctypes.c_double),\n ('q_cond', ctypes.c_void_p),\n ('dm_cond', ctypes.c_void_p),\n ('fprescreen', ctypes.c_void_p),\n ('r_vkscreen', ctypes.c_void_p)]\n\n################################################\n# for general DM\n# hermi = 0 : arbitary\n# hermi = 1 : hermitian\n# hermi = 2 : anti-hermitian\n################################################\ndef incore(eri, dm, hermi=0):\n assert(not numpy.iscomplexobj(eri))\n eri = numpy.ascontiguousarray(eri)\n dm = numpy.ascontiguousarray(dm)\n nao = dm.shape[0]\n vj = numpy.empty((nao,nao))\n vk = numpy.empty((nao,nao))\n npair = nao*(nao+1)//2\n if eri.ndim == 2 and npair*npair == eri.size: # 4-fold symmetry eri\n fdrv = getattr(libcvhf, 'CVHFnrs4_incore_drv')\n # 'ijkl,kl->ij'\n fvj = _fpointer('CVHFics4_kl_s2ij')\n # 'ijkl,il->jk'\n fvk = _fpointer('CVHFics4_il_s1jk')\n # or\n ## 'ijkl,ij->kl'\n #fvj = _fpointer('CVHFics4_ij_s2kl')\n ## 'ijkl,jk->il'\n #fvk = _fpointer('CVHFics4_jk_s1il')\n\n tridm = dm\n elif eri.ndim == 1 and npair*(npair+1)//2 == eri.size: # 8-fold symmetry eri\n fdrv = getattr(libcvhf, 'CVHFnrs8_incore_drv')\n fvj = _fpointer('CVHFics8_tridm_vj')\n if hermi == 1:\n fvk = _fpointer('CVHFics8_jk_s2il')\n else:\n fvk = _fpointer('CVHFics8_jk_s1il')\n tridm = pyscf.lib.pack_tril(pyscf.lib.transpose_sum(dm))\n i = numpy.arange(nao)\n tridm[i*(i+1)//2+i] *= .5\n else:\n raise RuntimeError('Array shape not consistent: DM %s, eri %s'\n % (dm.shape, eri.shape))\n fdrv(eri.ctypes.data_as(ctypes.c_void_p),\n tridm.ctypes.data_as(ctypes.c_void_p),\n vj.ctypes.data_as(ctypes.c_void_p),\n dm.ctypes.data_as(ctypes.c_void_p),\n vk.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(nao), fvj, fvk)\n if hermi != 0:\n vj = pyscf.lib.hermi_triu(vj, hermi)\n vk = pyscf.lib.hermi_triu(vk, hermi)\n else:\n vj = pyscf.lib.hermi_triu(vj, 1)\n return vj, vk\n\n# use cint2e_sph as cintor, CVHFnrs8_ij_s2kl, CVHFnrs8_jk_s2il as fjk to call\n# direct_mapdm\ndef direct(dms, atm, bas, env, vhfopt=None, hermi=0):\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C'),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = numpy.asarray(dms, order='C')\n\n if vhfopt is None:\n cintor = _fpointer('cint2e_sph')\n cintopt = make_cintopt(c_atm, c_bas, c_env, 'cint2e_sph')\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')\n fdot = _fpointer('CVHFdot_nrs8')\n fvj = _fpointer('CVHFnrs8_ji_s2kl')\n if hermi == 1:\n fvk = _fpointer('CVHFnrs8_li_s2kj')\n else:\n fvk = _fpointer('CVHFnrs8_li_s1kj')\n vjk = numpy.empty((2,n_dm,nao,nao))\n fjk = (ctypes.c_void_p*(2*n_dm))()\n dmsptr = (ctypes.c_void_p*(2*n_dm))()\n vjkptr = (ctypes.c_void_p*(2*n_dm))()\n for i in range(n_dm):\n dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)\n vjkptr[i] = vjk[0,i].ctypes.data_as(ctypes.c_void_p)\n fjk[i] = fvj\n for i in range(n_dm):\n dmsptr[n_dm+i] = dms[i].ctypes.data_as(ctypes.c_void_p)\n vjkptr[n_dm+i] = vjk[1,i].ctypes.data_as(ctypes.c_void_p)\n fjk[n_dm+i] = fvk\n shls_slice = (ctypes.c_int*8)(*([0, c_bas.shape[0]]*4))\n ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)\n\n fdrv(cintor, fdot, fjk, dmsptr, vjkptr,\n ctypes.c_int(n_dm*2), ctypes.c_int(1),\n shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n # vj must be symmetric\n for idm in range(n_dm):\n vjk[0,idm] = pyscf.lib.hermi_triu(vjk[0,idm], 1)\n if hermi != 0: # vk depends\n for idm in range(n_dm):\n vjk[1,idm] = pyscf.lib.hermi_triu(vjk[1,idm], hermi)\n if n_dm == 1:\n vjk = vjk.reshape(2,nao,nao)\n return vjk\n\n# call all fjk for each dm, the return array has len(dms)*len(jkdescript)*ncomp components\n# jkdescript: 'ij->s1kl', 'kl->s2ij', ...\ndef direct_mapdm(intor, aosym, jkdescript,\n dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):\n assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',\n 'a4ij', 'a4kl', 'a2ij', 'a2kl'))\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C'),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = [numpy.asarray(dm, order='C') for dm in dms]\n if isinstance(jkdescript, str):\n njk = 1\n jkdescript = (jkdescript,)\n else:\n njk = len(jkdescript)\n\n if vhfopt is None:\n cintor = _fpointer(intor)\n cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')\n dotsym = _INTSYMAP[aosym]\n fdot = _fpointer('CVHFdot_nr'+dotsym)\n\n if shls_slice is None:\n shls_slice = (0, c_bas.shape[0])*4\n ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)\n\n vjk = []\n descr_sym = [x.split('->') for x in jkdescript]\n fjk = (ctypes.c_void_p*(njk*n_dm))()\n dmsptr = (ctypes.c_void_p*(njk*n_dm))()\n vjkptr = (ctypes.c_void_p*(njk*n_dm))()\n for i, (dmsym, vsym) in enumerate(descr_sym):\n if dmsym in ('ij', 'kl', 'il', 'kj'):\n sys.stderr.write('not support DM description %s, transpose to %s\\n' %\n (dmsym, dmsym[::-1]))\n dmsym = dmsym[::-1]\n f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))\n\n vshape = (n_dm,ncomp) + get_dims(vsym[-2:], shls_slice, ao_loc)\n vjk.append(numpy.empty(vshape))\n for j in range(n_dm):\n assert(dms[j].shape == get_dims(dmsym, shls_slice, ao_loc))\n dmsptr[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)\n vjkptr[i*n_dm+j] = vjk[i][j].ctypes.data_as(ctypes.c_void_p)\n fjk[i*n_dm+j] = f1\n shls_slice = (ctypes.c_int*8)(*shls_slice)\n\n fdrv(cintor, fdot, fjk, dmsptr, vjkptr,\n ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),\n shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n if n_dm * ncomp == 1:\n vjk = [v.reshape(v.shape[2:]) for v in vjk]\n elif n_dm == 1:\n vjk = [v.reshape((ncomp,)+v.shape[2:]) for v in vjk]\n elif ncomp == 1:\n vjk = [v.reshape((n_dm,)+v.shape[2:]) for v in vjk]\n if njk == 1:\n vjk = vjk[0]\n return vjk\n\n# for density matrices in dms, bind each dm to a jk operator\n# jkdescript: 'ij->s1kl', 'kl->s2ij', ...\ndef direct_bindm(intor, aosym, jkdescript,\n dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):\n assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',\n 'a4ij', 'a4kl', 'a2ij', 'a2kl'))\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C'),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = [numpy.asarray(dm, order='C') for dm in dms]\n if isinstance(jkdescript, str):\n njk = 1\n jkdescript = (jkdescript,)\n else:\n njk = len(jkdescript)\n assert(njk == n_dm)\n\n if vhfopt is None:\n cintor = _fpointer(intor)\n cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')\n dotsym = _INTSYMAP[aosym]\n fdot = _fpointer('CVHFdot_nr'+dotsym)\n\n if shls_slice is None:\n shls_slice = (0, c_bas.shape[0])*4\n ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)\n\n vjk = []\n descr_sym = [x.split('->') for x in jkdescript]\n fjk = (ctypes.c_void_p*(n_dm))()\n dmsptr = (ctypes.c_void_p*(n_dm))()\n vjkptr = (ctypes.c_void_p*(n_dm))()\n for i, (dmsym, vsym) in enumerate(descr_sym):\n if dmsym in ('ij', 'kl', 'il', 'kj'):\n sys.stderr.write('not support DM description %s, transpose to %s\\n' %\n (dmsym, dmsym[::-1]))\n dmsym = dmsym[::-1]\n f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))\n\n assert(dms[i].shape == get_dims(dmsym, shls_slice, ao_loc))\n vshape = (ncomp,) + get_dims(vsym[-2:], shls_slice, ao_loc)\n vjk.append(numpy.empty(vshape))\n dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)\n vjkptr[i] = vjk[i].ctypes.data_as(ctypes.c_void_p)\n fjk[i] = f1\n shls_slice = (ctypes.c_int*8)(*shls_slice)\n\n fdrv(cintor, fdot, fjk, dmsptr, vjkptr,\n ctypes.c_int(n_dm), ctypes.c_int(ncomp),\n shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n if ncomp == 1:\n vjk = [v.reshape(v.shape[1:]) for v in vjk]\n if njk == 1:\n vjk = vjk[0]\n return vjk\n\n\n# 8-fold permutation symmetry\ndef int2e_sph(atm, bas, env):\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n libcvhf.CINTtot_cgto_spheric.restype = ctypes.c_int\n nao = libcvhf.CINTtot_cgto_spheric(c_bas.ctypes.data_as(ctypes.c_void_p), nbas)\n nao_pair = nao*(nao+1)//2\n eri = numpy.empty((nao_pair*(nao_pair+1)//2))\n libcvhf.int2e_sph(eri.ctypes.data_as(ctypes.c_void_p),\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n return eri\n\n\n################################################################\n# relativistic\ndef rdirect_mapdm(intor, aosym, jkdescript,\n dms, ncomp, atm, bas, env, vhfopt=None):\n assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',\n 'a4ij', 'a4kl', 'a2ij', 'a2kl'))\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)\n if isinstance(jkdescript, str):\n njk = 1\n jkdescript = (jkdescript,)\n else:\n njk = len(jkdescript)\n\n if vhfopt is None:\n cintor = _fpointer(intor)\n cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFr_direct_drv')\n dotsym = _INTSYMAP[aosym]\n fdot = _fpointer('CVHFdot_r'+dotsym)\n\n unpackas = _INTUNPACKMAP_R[aosym]\n descr_sym = [x.split('->') for x in jkdescript]\n fjk = (ctypes.c_void_p*(njk*n_dm))()\n dm1 = (ctypes.c_void_p*(njk*n_dm))()\n for i, (dmsym, vsym) in enumerate(descr_sym):\n f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))\n for j in range(n_dm):\n dm1[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)\n fjk[i*n_dm+j] = f1\n vjk = numpy.empty((njk,n_dm*ncomp,nao,nao), dtype=numpy.complex)\n\n fdrv(cintor, fdot, fjk, dm1,\n vjk.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),\n cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n if n_dm * ncomp == 1:\n vjk = vjk.reshape(njk,nao,nao)\n if njk == 1:\n vjk = vjk.reshape(vjk.shape[1:])\n return vjk\n\n# for density matrices in dms, bind each dm to a jk operator\ndef rdirect_bindm(intor, aosym, jkdescript,\n dms, ncomp, atm, bas, env, vhfopt=None):\n assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',\n 'a4ij', 'a4kl', 'a2ij', 'a2kl'))\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)\n if isinstance(jkdescript, str):\n njk = 1\n jkdescript = (jkdescript,)\n else:\n njk = len(jkdescript)\n assert(njk == n_dm)\n\n if vhfopt is None:\n cintor = _fpointer(intor)\n cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFr_direct_drv')\n dotsym = _INTSYMAP[aosym]\n fdot = _fpointer('CVHFdot_r'+dotsym)\n\n unpackas = _INTUNPACKMAP_R[aosym]\n descr_sym = [x.split('->') for x in jkdescript]\n fjk = (ctypes.c_void_p*(n_dm))()\n dm1 = (ctypes.c_void_p*(n_dm))()\n for i, (dmsym, vsym) in enumerate(descr_sym):\n f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))\n dm1[i] = dms[i].ctypes.data_as(ctypes.c_void_p)\n fjk[i] = f1\n vjk = numpy.empty((njk,ncomp,nao,nao), dtype=numpy.complex)\n\n fdrv(cintor, fdot, fjk, dm1,\n vjk.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(n_dm), ctypes.c_int(ncomp),\n cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n if ncomp == 1:\n vjk = vjk.reshape(njk,nao,nao)\n if njk == 1:\n vjk = vjk.reshape(vjk.shape[1:])\n return vjk\n\n# 'a4ij': anti-symm between ij, symm between kl\n# 'a4kl': anti-symm between kl, symm between ij\n# 'a2ij': anti-symm between ij,\n# 'a2kl': anti-symm between kl,\n_INTSYMAP= {\n 's8' : 's8' ,\n 's4' : 's4' ,\n 's2ij': 's2ij',\n 's2kl': 's2kl',\n 's1' : 's1' ,\n 'a4ij': 's4' ,\n 'a4kl': 's4' ,\n 'a2ij': 's2ij',\n 'a2kl': 's2kl',\n}\n\n_INTUNPACKMAP_R = {\n 's8' : 's8' ,\n 's4' : 's4' ,\n 's2ij': 's2ij',\n 's2kl': 's2kl',\n 's1' : 's1' ,\n 'a4ij': 'ah4' ,\n 'a4kl': 'ha4' ,\n 'a2ij': 'ah2ij',\n 'a2kl': 'ha2kl',\n}\n\ndef make_ao_loc(bas, cart=False):\n l = bas[:,gto.ANG_OF]\n if cart:\n dims = (l+1)*(l+2)//2 * bas[:,gto.NCTR_OF]\n else:\n dims = (l*2+1) * bas[:,gto.NCTR_OF]\n ao_loc = numpy.empty(len(bas)+1, dtype=numpy.int32)\n ao_loc[0] = 0\n dims.cumsum(dtype=numpy.int32, out=ao_loc[1:])\n return ao_loc\n\n_SHLINDEX = {'i': 0, 'j': 2, 'k': 4, 'l': 6}\ndef get_dims(descr_sym, shls_slice, ao_loc):\n i = _SHLINDEX[descr_sym[0]]\n j = _SHLINDEX[descr_sym[1]]\n di = ao_loc[shls_slice[i+1]] - ao_loc[shls_slice[i]]\n dj = ao_loc[shls_slice[j+1]] - ao_loc[shls_slice[j]]\n return (di,dj)\n\n" ]
[ [ "numpy.empty", "numpy.asarray", "numpy.arange", "numpy.iscomplexobj", "numpy.ascontiguousarray" ] ]
mrzhuzhe/yunru
[ "faa7380a5363f654f1dc8f5d53b077d9f33bff6f" ]
[ "yolov5/models/common.py" ]
[ "# YOLOv5 common modules\n\nimport math\nfrom copy import copy\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torch.cuda import amp\n\nfrom utils.datasets import letterbox\nfrom utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box\nfrom utils.plots import colors, plot_one_box\nfrom utils.torch_utils import time_synchronized\n\n\ndef autopad(k, p=None): # kernel, padding\n # Pad to 'same'\n if p is None:\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad\n return p\n\n\ndef DWConv(c1, c2, k=1, s=1, act=True):\n # Depthwise convolution\n return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)\n\n\nclass Conv(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(Conv, self).__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))\n\n\nclass TransformerLayer(nn.Module):\n # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)\n def __init__(self, c, num_heads):\n super().__init__()\n self.q = nn.Linear(c, c, bias=False)\n self.k = nn.Linear(c, c, bias=False)\n self.v = nn.Linear(c, c, bias=False)\n self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)\n self.fc1 = nn.Linear(c, c, bias=False)\n self.fc2 = nn.Linear(c, c, bias=False)\n\n def forward(self, x):\n x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x\n x = self.fc2(self.fc1(x)) + x\n return x\n\n\nclass TransformerBlock(nn.Module):\n # Vision Transformer https://arxiv.org/abs/2010.11929\n def __init__(self, c1, c2, num_heads, num_layers):\n super().__init__()\n self.conv = None\n if c1 != c2:\n self.conv = Conv(c1, c2)\n self.linear = nn.Linear(c2, c2) # learnable position embedding\n self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])\n self.c2 = c2\n\n def forward(self, x):\n if self.conv is not None:\n x = self.conv(x)\n b, _, w, h = x.shape\n p = x.flatten(2)\n p = p.unsqueeze(0)\n p = p.transpose(0, 3)\n p = p.squeeze(3)\n e = self.linear(p)\n x = p + e\n\n x = self.tr(x)\n x = x.unsqueeze(3)\n x = x.transpose(0, 3)\n x = x.reshape(b, self.c2, w, h)\n return x\n\n\nclass Bottleneck(nn.Module):\n # Standard bottleneck\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super(Bottleneck, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass BottleneckCSP(nn.Module):\n # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super(BottleneckCSP, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n self.cv4 = Conv(2 * c_, c2, 1, 1)\n self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)\n self.act = nn.LeakyReLU(0.1, inplace=True)\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n\n def forward(self, x):\n y1 = self.cv3(self.m(self.cv1(x)))\n y2 = self.cv2(x)\n return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass C3(nn.Module):\n # CSP Bottleneck with 3 convolutions\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super(C3, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))\n\n\nclass C3TR(C3):\n # C3 module with TransformerBlock()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = TransformerBlock(c_, c_, 4, n)\n\n\nclass SPP(nn.Module):\n # Spatial pyramid pooling layer used in YOLOv3-SPP\n def __init__(self, c1, c2, k=(5, 9, 13)):\n super(SPP, self).__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\n self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n\n def forward(self, x):\n x = self.cv1(x)\n return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\n\n\nclass Focus(nn.Module):\n # Focus wh information into c-space\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(Focus, self).__init__()\n self.conv = Conv(c1 * 4, c2, k, s, p, g, act)\n # self.contract = Contract(gain=2)\n\n def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)\n return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))\n # return self.conv(self.contract(x))\n\n\nclass Contract(nn.Module):\n # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'\n s = self.gain\n x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)\n return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)\n\n\nclass Expand(nn.Module):\n # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'\n s = self.gain\n x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)\n return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)\n\n\nclass Concat(nn.Module):\n # Concatenate a list of tensors along dimension\n def __init__(self, dimension=1):\n super(Concat, self).__init__()\n self.d = dimension\n\n def forward(self, x):\n return torch.cat(x, self.d)\n\n\nclass NMS(nn.Module):\n # Non-Maximum Suppression (NMS) module\n conf = 0.25 # confidence threshold\n iou = 0.45 # IoU threshold\n classes = None # (optional list) filter by class\n max_det = 1000 # maximum number of detections per image\n\n def __init__(self):\n super(NMS, self).__init__()\n\n def forward(self, x):\n return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det)\n\n\nclass AutoShape(nn.Module):\n # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS\n conf = 0.25 # NMS confidence threshold\n iou = 0.45 # NMS IoU threshold\n classes = None # (optional list) filter by class\n max_det = 1000 # maximum number of detections per image\n\n def __init__(self, model):\n super(AutoShape, self).__init__()\n self.model = model.eval()\n\n def autoshape(self):\n print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()\n return self\n\n @torch.no_grad()\n def forward(self, imgs, size=640, augment=False, profile=False):\n # Inference from various sources. For height=640, width=1280, RGB images example inputs are:\n # filename: imgs = 'data/images/zidane.jpg'\n # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'\n # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)\n # PIL: = Image.open('image.jpg') # HWC x(640,1280,3)\n # numpy: = np.zeros((640,1280,3)) # HWC\n # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)\n # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images\n\n t = [time_synchronized()]\n p = next(self.model.parameters()) # for device and type\n if isinstance(imgs, torch.Tensor): # torch\n with amp.autocast(enabled=p.device.type != 'cpu'):\n return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference\n\n # Pre-process\n n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images\n shape0, shape1, files = [], [], [] # image and inference shapes, filenames\n for i, im in enumerate(imgs):\n f = f'image{i}' # filename\n if isinstance(im, str): # filename or uri\n im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im\n elif isinstance(im, Image.Image): # PIL Image\n im, f = np.asarray(im), getattr(im, 'filename', f) or f\n files.append(Path(f).with_suffix('.jpg').name)\n if im.shape[0] < 5: # image in CHW\n im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)\n im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input\n s = im.shape[:2] # HWC\n shape0.append(s) # image shape\n g = (size / max(s)) # gain\n shape1.append([y * g for y in s])\n imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update\n shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape\n x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad\n x = np.stack(x, 0) if n > 1 else x[0][None] # stack\n x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW\n x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32\n t.append(time_synchronized())\n\n with amp.autocast(enabled=p.device.type != 'cpu'):\n # Inference\n y = self.model(x, augment, profile)[0] # forward\n t.append(time_synchronized())\n\n # Post-process\n y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS\n for i in range(n):\n scale_coords(shape1, y[i][:, :4], shape0[i])\n\n t.append(time_synchronized())\n return Detections(imgs, y, files, t, self.names, x.shape)\n\n\nclass Detections:\n # detections class for YOLOv5 inference results\n def __init__(self, imgs, pred, files, times=None, names=None, shape=None):\n super(Detections, self).__init__()\n d = pred[0].device # device\n gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations\n self.imgs = imgs # list of images as numpy arrays\n self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)\n self.names = names # class names\n self.files = files # image filenames\n self.xyxy = pred # xyxy pixels\n self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels\n self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized\n self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized\n self.n = len(self.pred) # number of images (batch size)\n self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)\n self.s = shape # inference BCHW shape\n\n def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):\n for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):\n str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '\n if pred is not None:\n for c in pred[:, -1].unique():\n n = (pred[:, -1] == c).sum() # detections per class\n str += f\"{n} {self.names[int(c)]}{'s' * (n > 1)}, \" # add to string\n if show or save or render or crop:\n for *box, conf, cls in pred: # xyxy, confidence, class\n label = f'{self.names[int(cls)]} {conf:.2f}'\n if crop:\n save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])\n else: # all others\n plot_one_box(box, im, label=label, color=colors(cls))\n\n im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np\n if pprint:\n print(str.rstrip(', '))\n if show:\n im.show(self.files[i]) # show\n if save:\n f = self.files[i]\n im.save(save_dir / f) # save\n print(f\"{'Saved' * (i == 0)} {f}\", end=',' if i < self.n - 1 else f' to {save_dir}\\n')\n if render:\n self.imgs[i] = np.asarray(im)\n\n def print(self):\n self.display(pprint=True) # print results\n print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)\n\n def show(self):\n self.display(show=True) # show results\n\n def save(self, save_dir='runs/hub/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir\n self.display(save=True, save_dir=save_dir) # save results\n\n def crop(self, save_dir='runs/hub/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir\n self.display(crop=True, save_dir=save_dir) # crop results\n print(f'Saved results to {save_dir}\\n')\n\n def render(self):\n self.display(render=True) # render results\n return self.imgs\n\n def pandas(self):\n # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])\n new = copy(self) # return copy\n ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns\n cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns\n for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):\n a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update\n setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])\n return new\n\n def tolist(self):\n # return a list of Detections objects, i.e. 'for result in results.tolist():'\n x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]\n for d in x:\n for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:\n setattr(d, k, getattr(d, k)[0]) # pop out of list\n return x\n\n def __len__(self):\n return self.n\n\n\nclass Classify(nn.Module):\n # Classification head, i.e. x(b,c1,20,20) to x(b,c2)\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups\n super(Classify, self).__init__()\n self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)\n self.flat = nn.Flatten()\n\n def forward(self, x):\n z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list\n return self.flat(self.conv(z)) # flatten to x(b,c2)" ]
[ [ "torch.no_grad", "numpy.asarray", "torch.nn.Conv2d", "numpy.stack", "numpy.ascontiguousarray", "torch.cat", "torch.nn.BatchNorm2d", "torch.from_numpy", "torch.nn.MaxPool2d", "numpy.tile", "torch.nn.AdaptiveAvgPool2d", "torch.tensor", "torch.nn.MultiheadAttention", "torch.nn.Linear", "torch.nn.Flatten", "pandas.DataFrame", "torch.nn.SiLU", "torch.cuda.amp.autocast", "torch.nn.Identity", "torch.nn.LeakyReLU" ] ]
nadavyayon/cell2location
[ "54141fb85d4b0d64825dfdb6d1bf147b025c856b" ]
[ "cell2location/plt/plot_factor_spatial.py" ]
[ "#!pip install plotnine\nimport numpy as np\nimport pandas as pd\nimport plotnine\n\n\ndef plot_factor_spatial(\n adata,\n fact,\n cluster_names,\n fact_ind=[0],\n trans=\"log\",\n sample_name=None,\n samples_col=\"sample\",\n obs_x=\"imagecol\",\n obs_y=\"imagerow\",\n n_columns=6,\n max_col=5000,\n col_breaks=[0.1, 100, 1000, 3000],\n figure_size=(24, 5.7),\n point_size=0.8,\n text_size=9,\n):\n r\"\"\"Plot expression of factors / cell types in space.\n Convenient but not as powerful as scanpy plotting.\n :param adata: anndata object with spatial data\n :param fact: pd.DataFrame with spatial expression of factors (W), e.g. mod.spot_factors_df\n :param cluster_names: names of those factors to show on a plot\n :param fact_ind: index of factors to plot\n :param trans: transform colorscale? passed to plotnine.scale_color_cmap\n :param sample_name: if anndata object contains multiple samples specify which sample to plot (no warning given if not)\n :param samples_col: if anndata object contains multiple which .obs columns specifies sample?\n :param obs_x: which .obs columns specifies x coordinate?\n :param obs_y: which .obs columns specifies y coordinate?\n :param n_columns: how many factors / clusters to plot in each row (plotnine.facet_grid)\n :param max_col: colorscale maximum expression in fact\n :param col_breaks: colorscale breaks\n :param figure_size: figures size works weirdly (only x axis has an effect, use 24 for 6-column plot, 12 for 3, 8 for 2 ...).\n :param point_size: point size of spots\n :param text_size: text size\n \"\"\"\n\n if sample_name is not None:\n sample_ind = np.isin(adata.obs[samples_col], sample_name)\n else:\n sample_ind = np.repeat(True, adata.shape[0])\n\n # adata.obsm['X_spatial'][:,0] vs adata.obs['imagecol'] & adata.obs['imagerow']\n\n for_plot = np.concatenate(\n (\n adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),\n -adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),\n fact.iloc[:, fact_ind[0]].values.reshape((adata.obs.shape[0], 1)),\n np.array([cluster_names[fact_ind[0]] for j in range(adata.obs.shape[0])]).reshape((adata.obs.shape[0], 1)),\n ),\n 1,\n )\n for_plot = pd.DataFrame(for_plot, index=adata.obs.index, columns=[\"imagecol\", \"imagerow\", \"weights\", \"cluster\"])\n # select only correct sample\n for_plot = for_plot.loc[sample_ind, :]\n\n for i in fact_ind[1:]:\n for_plot1 = np.concatenate(\n (\n adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),\n -adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),\n fact.iloc[:, i].values.reshape((adata.obs.shape[0], 1)),\n np.array([cluster_names[i] for j in range(adata.obs.shape[0])]).reshape((adata.obs.shape[0], 1)),\n ),\n 1,\n )\n for_plot1 = pd.DataFrame(\n for_plot1, index=adata.obs.index, columns=[\"imagecol\", \"imagerow\", \"weights\", \"cluster\"]\n )\n # select only correct sample\n for_plot1 = for_plot1.loc[sample_ind, :]\n for_plot = pd.concat((for_plot, for_plot1))\n\n for_plot[\"imagecol\"] = pd.to_numeric(for_plot[\"imagecol\"])\n for_plot[\"imagerow\"] = pd.to_numeric(for_plot[\"imagerow\"])\n for_plot[\"weights\"] = pd.to_numeric(for_plot[\"weights\"])\n for_plot[\"cluster\"] = pd.Categorical(for_plot[\"cluster\"], categories=cluster_names[fact_ind], ordered=True)\n\n # print(np.log(np.max(for_plot['weights'])))\n ax = (\n plotnine.ggplot(for_plot, plotnine.aes(\"imagecol\", \"imagerow\", color=\"weights\"))\n + plotnine.geom_point(size=point_size)\n + plotnine.scale_color_cmap(\"magma\", trans=trans, limits=[0.1, max_col], breaks=col_breaks + [max_col])\n + plotnine.coord_fixed()\n + plotnine.theme_bw()\n + plotnine.theme(\n panel_background=plotnine.element_rect(fill=\"black\", colour=\"black\", size=0, linetype=\"solid\"),\n panel_grid_major=plotnine.element_line(size=0, linetype=\"solid\", colour=\"black\"),\n panel_grid_minor=plotnine.element_line(size=0, linetype=\"solid\", colour=\"black\"),\n strip_text=plotnine.element_text(size=text_size),\n )\n + plotnine.facet_wrap(\"~cluster\", ncol=n_columns)\n + plotnine.ggtitle(\"nUMI from each cell type\")\n + plotnine.theme(figure_size=figure_size)\n )\n\n return ax\n\n\ndef plot_categ_spatial(mod, adata, sample_col, color, n_columns=2, figure_size=(24, 5.7), point_size=0.8, text_size=9):\n\n for_plot = adata.obs[[\"imagecol\", \"imagerow\", sample_col]]\n for_plot[\"color\"] = color\n\n # fix types\n for_plot[\"color\"] = pd.Categorical(for_plot[\"color\"], ordered=True)\n # for_plot['color'] = pd.to_numeric(for_plot['color'])\n for_plot[\"sample\"] = pd.Categorical(for_plot[sample_col], ordered=False)\n for_plot[\"imagecol\"] = pd.to_numeric(for_plot[\"imagecol\"])\n for_plot[\"imagerow\"] = -pd.to_numeric(for_plot[\"imagerow\"])\n\n ax = (\n plotnine.ggplot(for_plot, plotnine.aes(x=\"imagecol\", y=\"imagerow\", color=\"color\"))\n + plotnine.geom_point(size=point_size) # + plotnine.scale_color_cmap()\n + plotnine.coord_fixed()\n + plotnine.theme_bw()\n + plotnine.theme(\n panel_background=plotnine.element_rect(fill=\"black\", colour=\"black\", size=0, linetype=\"solid\"),\n panel_grid_major=plotnine.element_line(size=0, linetype=\"solid\", colour=\"black\"),\n panel_grid_minor=plotnine.element_line(size=0, linetype=\"solid\", colour=\"black\"),\n strip_text=plotnine.element_text(size=text_size),\n )\n + plotnine.facet_wrap(\"~sample\", ncol=n_columns)\n + plotnine.theme(figure_size=figure_size)\n )\n\n return ax\n" ]
[ [ "pandas.to_numeric", "pandas.DataFrame", "numpy.repeat", "pandas.Categorical", "numpy.isin", "pandas.concat" ] ]
NCAR/GeoCAT-examples
[ "5ed9a1d68b69a921d0f1fee1160e109853926ed9" ]
[ "Plots/Scatter/NCL_scatter_5.py" ]
[ "\"\"\"\nNCL_scatter_5.py\n================\nThis script illustrates the following concepts:\n - Drawing a scatter plot with markers of different colors\n - Generating dummy data using \"random.normal\"\n - Manually creating a legend using markers and text\n - Customizing the label locations in a legend\n - Changing the orientation of a legend\n - Drawing a legend outside an XY plot\n - Changing the markers in an XY plot\n - Changing the marker color in an XY plot\n - Changing the marker size in an XY plot\n\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/scatter_5.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/scatter_5_lg.png\n\"\"\"\n\n##############################################################################\n# Import packages:\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cycler import cycler\n\nfrom geocat.viz import util as gvutil\n\n##############################################################################\n# Generate random data from a normal (Gaussian) distribution with a mean of 10 and standard deviation of 3\nnpts = 300\nrandom = np.random.default_rng(seed=1)\ndata = random.normal(loc=10, scale=3, size=npts)\n\n##############################################################################\n# Specify colors and markers\ncolors = [\n 'darkgoldenrod', 'darkgreen', 'coral', 'cyan', 'firebrick', 'darkslateblue',\n 'limegreen', 'goldenrod'\n]\nmarkers = ['+', '*', 'o', 'x', 's', '^', 'v', 'D']\n\n# This line cycles which color is used to plot the markers\nplt.rcParams['axes.prop_cycle'] = cycler(color=colors)\n\n##############################################################################\n# Plot\nfig = plt.figure(figsize=(8, 8))\n# Adjust the axes size to accommodate the legend at the bottom\nax = plt.axes([0.15, 0.2, 0.75, 0.70])\n\n# Divide data into 8 bins and plot\nnumBins = 8\nindices = np.arange(0, 300)\npartitions = np.linspace(0, 20, numBins + 1)\nlabel = \"{start:g}:{end:g}\"\nfor x in range(0, numBins):\n bins = np.where(data > partitions[x], data, np.nan)\n with np.errstate(\n invalid='ignore'\n ): # Indeed not needed, just to get rid of warnings about numpy's NaN comparisons\n bins = np.where(bins < partitions[x + 1], bins, np.nan)\n indices = np.where(bins != np.nan, indices, np.nan)\n plt.plot(indices,\n bins,\n marker=markers[x],\n fillstyle='none',\n linewidth=0,\n label=label.format(start=partitions[x], end=partitions[x + 1]))\n\n# `ncol` being equal to the number of labels makes it appear horizontal\nlegend = ax.legend(bbox_to_anchor=(-0.075, -0.2),\n ncol=numBins,\n loc='lower left',\n columnspacing=0.5,\n frameon=False)\nfor txt in legend.get_texts():\n txt.set_ha(\"center\") # horizontal alignment of text item\n txt.set_va(\"center\") # vertical alignment of text item\n # Move label text so it is centered under the marker\n txt.set_x(-25) # x-position\n txt.set_y(-20) # y-position\n\n# Use geocat.viz.util convenience function to set axes parameters\ngvutil.set_axes_limits_and_ticks(ax,\n xlim=(0, 300),\n ylim=(0, 21),\n xticks=range(0, 301, 50),\n yticks=range(0, 22, 3))\n\n# Use geocat.viz.util convenience function to add minor and major tick lines\ngvutil.add_major_minor_ticks(ax,\n x_minor_per_major=5,\n y_minor_per_major=3,\n labelsize=14)\n\n# Use geocat.viz.util convenience function to set titles and labels\ngvutil.set_titles_and_labels(ax, maintitle=\"Scatter plot with grouped markers\")\n\nplt.show()\n" ]
[ [ "numpy.random.default_rng", "matplotlib.pyplot.figure", "matplotlib.pyplot.axes", "numpy.arange", "numpy.errstate", "matplotlib.pyplot.show", "numpy.where", "numpy.linspace" ] ]
Nick-AhSen/iGibson
[ "c6854f11eec5d935fa3ef3d6d4852c6571beab4b" ]
[ "igibson/examples/demo/generate_data_semseg_lidar.py" ]
[ "import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport igibson\nfrom igibson.envs.igibson_env import iGibsonEnv\n\n\ndef get_lidar_sampling_pattern():\n lidar_vertical_low = -15 / 180.0 * np.pi\n lidar_vertical_high = 15 / 180.0 * np.pi\n lidar_vertical_n_beams = 16\n lidar_vertical_beams = np.arange(\n lidar_vertical_low,\n lidar_vertical_high + (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),\n (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),\n )\n\n lidar_horizontal_low = -45 / 180.0 * np.pi\n lidar_horizontal_high = 45 / 180.0 * np.pi\n lidar_horizontal_n_beams = 468\n lidar_horizontal_beams = np.arange(\n lidar_horizontal_low,\n lidar_horizontal_high,\n (lidar_horizontal_high - lidar_horizontal_low) / (lidar_horizontal_n_beams),\n )\n\n xx, yy = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams)\n xx = xx.flatten()\n yy = yy.flatten()\n\n height = 128\n\n x_samples = (np.tan(xx) / np.cos(yy) * height // 2 + height // 2).astype(np.int)\n y_samples = (np.tan(yy) * height // 2 + height // 2).astype(np.int)\n\n x_samples = x_samples.flatten()\n y_samples = y_samples.flatten()\n return x_samples, y_samples\n\n\nx_samples, y_samples = get_lidar_sampling_pattern()\n\n\ndef generate_data_lidar(nav_env, num_samples=3):\n\n rgb_all = []\n lidar_all = []\n lidar_all_2 = []\n label_all = []\n\n point = nav_env.scene.get_random_point()[1]\n\n for _ in range(num_samples):\n new_point = nav_env.scene.get_random_point()[1]\n\n while np.linalg.norm(new_point - point) > 1:\n new_point = nav_env.scene.get_random_point()[1]\n\n delta_pos = new_point - point\n delta_pos = np.array([delta_pos[1], delta_pos[2], delta_pos[0]])\n # print(delta_pos)\n nav_env.robots[0].set_position(new_point)\n pano_rgb = nav_env.simulator.renderer.get_cube(mode=\"rgb\", use_robot_camera=True)\n pano_3d = nav_env.simulator.renderer.get_cube(mode=\"3d\", use_robot_camera=True)\n pano_seg = nav_env.simulator.renderer.get_cube(mode=\"seg\", use_robot_camera=True)\n\n r3 = np.array(\n [[np.cos(-np.pi / 2), 0, -np.sin(-np.pi / 2)], [0, 1, 0], [np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2)]]\n )\n transformatiom_matrix = np.eye(3)\n\n for i in range(4):\n lidar_all.append(pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] - delta_pos[None, :])\n rgb_all.append(pano_rgb[i][:, :, :3][x_samples, y_samples])\n label_all.append(pano_seg[i][:, :, 0][x_samples, y_samples] * 255.0)\n lidar_all_2.append(\n pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] * 0.9 - delta_pos[None, :]\n )\n transformatiom_matrix = r3.dot(transformatiom_matrix)\n\n lidar_all = np.concatenate(lidar_all, 0).astype(np.float32)\n lidar_all_2 = np.concatenate(lidar_all_2, 0).astype(np.float32)\n rgb_all = np.concatenate(rgb_all, 0).astype(np.float32)\n label_all = np.concatenate(label_all, 0).astype(np.int32)\n\n assert len(label_all) == len(label_all)\n\n direction = lidar_all - lidar_all_2\n direction = direction / (np.linalg.norm(direction, axis=1)[:, None] + 1e-5)\n\n print(lidar_all.shape, direction.shape, rgb_all.shape, label_all.shape)\n return lidar_all, direction, rgb_all, label_all\n\n\ndef generate_data_from_scene(scene_id):\n\n mode = \"headless\"\n config = os.path.join(igibson.example_path, \"configs/fetch_room_rearrangement.yaml\")\n nav_env = iGibsonEnv(\n config_file=config, mode=mode, scene_id=scene_id, action_timestep=1.0 / 120.0, physics_timestep=1.0 / 120.0\n )\n # data = []\n # for i in tqdm(range(5)):\n # data.append(generate_data_lidar(nav_env))\n\n # lidar_all = [item[0] for item in data]\n # direction = [item[1] for item in data]\n # rgb_all = [item[2] for item in data]\n # label_all = [item[3] for item in data]\n\n pts, direction, color, label = generate_data_lidar(nav_env)\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], s=3, c=color[:, :3])\n plt.show()\n\n # np.savez('/data2/point_cloud/data10_v2_{}.npz'.format(scene_id), lidar=lidar_all, direction=direction, rgb=rgb_all, label=label_all)\n\n\nif __name__ == \"__main__\":\n generate_data_from_scene(\"Rs_int\")\n\n# scenes = []\n# with open('scene_list', 'r') as f:\n# for line in f:\n# scenes.append(line.strip())\n\n# p = Pool(2)\n# p.map(generate_data_from_scene, scenes)\n" ]
[ [ "numpy.eye", "matplotlib.pyplot.figure", "numpy.cos", "numpy.arange", "matplotlib.pyplot.show", "numpy.tan", "numpy.array", "numpy.sin", "numpy.concatenate", "numpy.meshgrid", "numpy.linalg.norm" ] ]
Spain-AI/dark_helper
[ "c2a5d774b455b2a374d6ca5e2715f7a560f5fe5b" ]
[ "face_lib/sort.py" ]
[ "\"\"\"\n SORT: A Simple, Online and Realtime Tracker\n Copyright (C) 2016-2020 Alex Bewley [email protected]\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nfrom filterpy.kalman import KalmanFilter\n\ntry:\n from numba import jit\nexcept:\n def jit(func):\n return func\n\nnp.random.seed(0)\n\n\ndef linear_assignment(cost_matrix):\n try:\n import lap\n _, x, y = lap.lapjv(cost_matrix, extend_cost=True)\n return np.array([[y[i],i] for i in x if i >= 0]) #\n except ImportError:\n from scipy.optimize import linear_sum_assignment\n x, y = linear_sum_assignment(cost_matrix)\n return np.array(list(zip(x, y)))\n\n\n@jit\ndef iou(bb_test, bb_gt):\n \"\"\"\n Computes IUO between two bboxes in the form [x1,y1,x2,y2]\n \"\"\"\n xx1 = np.maximum(bb_test[0], bb_gt[0])\n yy1 = np.maximum(bb_test[1], bb_gt[1])\n xx2 = np.minimum(bb_test[2], bb_gt[2])\n yy2 = np.minimum(bb_test[3], bb_gt[3])\n w = np.maximum(0., xx2 - xx1)\n h = np.maximum(0., yy2 - yy1)\n wh = w * h\n o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])\n + (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)\n return(o)\n\n\ndef convert_bbox_to_z(bbox):\n \"\"\"\n Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form\n [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is\n the aspect ratio\n \"\"\"\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w/2.\n y = bbox[1] + h/2.\n s = w * h #scale is just area\n r = w / float(h)\n return np.array([x, y, s, r]).reshape((4, 1))\n\n\ndef convert_x_to_bbox(x,score=None):\n \"\"\"\n Takes a bounding box in the centre form [x,y,s,r] and returns it in the form\n [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right\n \"\"\"\n w = np.sqrt(x[2] * x[3])\n h = x[2] / w\n if(score==None):\n return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))\n else:\n return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))\n\n\nclass KalmanBoxTracker(object):\n \"\"\"\n This class represents the internal state of individual tracked objects observed as bbox.\n \"\"\"\n count = 0\n def __init__(self,bbox,emb):\n \"\"\"\n Initialises a tracker using initial bounding box.\n \"\"\"\n #define constant velocity model\n self.kf = KalmanFilter(dim_x=7, dim_z=4) \n self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])\n self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])\n\n self.kf.R[2:,2:] *= 10.\n self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities\n self.kf.P *= 10.\n self.kf.Q[-1,-1] *= 0.01\n self.kf.Q[4:,4:] *= 0.01\n\n self.kf.x[:4] = convert_bbox_to_z(bbox)\n self.time_since_update = 0\n self.id = KalmanBoxTracker.count\n KalmanBoxTracker.count += 1\n self.history = []\n self.hits = 0\n self.hit_streak = 0\n self.age = 0\n self.emb = emb\n\n def update(self, bbox, emb):\n \"\"\"\n Updates the state vector with observed bbox.\n \"\"\"\n self.time_since_update = 0\n self.history = []\n self.hits += 1\n self.hit_streak += 1\n self.kf.update(convert_bbox_to_z(bbox))\n\n self.emb = 0.2 * emb + 0.8 * self.emb\n\n def predict(self):\n \"\"\"\n Advances the state vector and returns the predicted bounding box estimate.\n \"\"\"\n if((self.kf.x[6]+self.kf.x[2])<=0):\n self.kf.x[6] *= 0.0\n self.kf.predict()\n self.age += 1\n self.time_since_update += 1\n self.history.append(convert_x_to_bbox(self.kf.x))\n return self.history[-1]\n\n def get_state(self):\n \"\"\"\n Returns the current bounding box estimate.\n \"\"\"\n return convert_x_to_bbox(self.kf.x)\n\n\ndef associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):\n \"\"\"\n Assigns detections to tracked object (both represented as bounding boxes)\n\n Returns 3 lists of matches, unmatched_detections and unmatched_trackers\n \"\"\"\n if(len(trackers)==0):\n return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)\n iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)\n\n for d,det in enumerate(detections):\n for t,trk in enumerate(trackers):\n iou_matrix[d,t] = iou(det, trk)\n\n if min(iou_matrix.shape) > 0:\n a = (iou_matrix > iou_threshold).astype(np.int32)\n if a.sum(1).max() == 1 and a.sum(0).max() == 1:\n matched_indices = np.stack(np.where(a), axis=1)\n else:\n matched_indices = linear_assignment(-iou_matrix)\n else:\n matched_indices = np.empty(shape=(0,2))\n\n unmatched_detections = []\n for d, det in enumerate(detections):\n if(d not in matched_indices[:,0]):\n unmatched_detections.append(d)\n unmatched_trackers = []\n for t, trk in enumerate(trackers):\n if(t not in matched_indices[:,1]):\n unmatched_trackers.append(t)\n\n #filter out matched with low IOU\n matches = []\n for m in matched_indices:\n if(iou_matrix[m[0], m[1]]<iou_threshold):\n unmatched_detections.append(m[0])\n unmatched_trackers.append(m[1])\n else:\n matches.append(m.reshape(1,2))\n if(len(matches)==0):\n matches = np.empty((0,2),dtype=int)\n else:\n matches = np.concatenate(matches,axis=0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)\n\n\nclass Sort(object):\n def __init__(self, max_age=15, min_hits=3):\n \"\"\"\n Sets key parameters for SORT\n \"\"\"\n self.max_age = max_age\n self.min_hits = min_hits\n self.trackers = []\n self.frame_count = 0\n\n def update(self, dets=np.empty((0, 4)), embs=None):\n \"\"\"\n Params:\n dets - a numpy array of detections in the format [[x1,y1,x2,y2],[x1,y1,x2,y2],...]\n Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).\n Returns the a similar array, where the last column is the object ID.\n\n NOTE: The number of objects returned may differ from the number of detections provided.\n \"\"\"\n self.frame_count += 1\n # get predicted locations from existing trackers.\n trks = np.zeros((len(self.trackers), 4))\n to_del = []\n ret = []\n for t, trk in enumerate(trks):\n pos = self.trackers[t].predict()[0]\n trk[:] = [pos[0], pos[1], pos[2], pos[3]]\n if np.any(np.isnan(pos)):\n to_del.append(t)\n trks = np.ma.compress_rows(np.ma.masked_invalid(trks))\n for t in reversed(to_del):\n self.trackers.pop(t)\n matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)\n\n # update matched trackers with assigned detections\n for m in matched:\n self.trackers[m[1]].update(dets[m[0], :], embs[m[0]])\n\n # create and initialise new trackers for unmatched detections\n for i in unmatched_dets:\n trk = KalmanBoxTracker(dets[i,:], embs[i])\n self.trackers.append(trk)\n i = len(self.trackers)\n for trk in reversed(self.trackers):\n d = trk.get_state()[0]\n if (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):\n ret.append((np.concatenate((d, [trk.id + 1])), trk.emb)) # +1 as MOT benchmark requires positive\n i -= 1\n # remove dead tracklet\n if(trk.time_since_update > self.max_age):\n self.trackers.pop(i)\n if(len(ret)>0):\n return ret\n return []" ]
[ [ "numpy.sqrt", "numpy.empty", "numpy.ma.masked_invalid", "numpy.random.seed", "scipy.optimize.linear_sum_assignment", "numpy.where", "numpy.isnan", "numpy.maximum", "numpy.concatenate", "numpy.array", "numpy.minimum" ] ]
ashishgaurav13/cl_safer_classifiers
[ "a3df87a4bc863377485fa58a8a475991a4fc9800" ]
[ "utils/data_iterators/cifar100.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.backend import clear_session\nfrom keras.utils import to_categorical\nimport tensorflow.keras as keras\nfrom .common import save_pickle, load_pickle\nfrom tqdm import tqdm\n\n# utils/data_iterators/cifar100_ResNet44v1_model.171.h5 => flatten\n# utils/data_iterators/cifar100_ResNet44v1_model.171.h5 => activation_42\n\nclass CIFAR100_DataIterator:\n\n def __init__(self, train_data, test_data, batch_size = 32, \n randomize = True, task_labels = None,\n embedding_save_file = 'utils/data_iterators/cifar100_embedding.pkl',\n embedding_model_file = 'utils/data_iterators/cifar100_ResNet44v1_model.171.h5',\n embedding_model_layer = 'activation_42'): # 'flatten'):\n\n assert(task_labels != None)\n self.train_x, self.train_y = train_data\n self.n = len(self.train_y)\n print('Training examples = %d' % self.n)\n self.test_x, self.test_y = test_data\n self.tn = len(self.test_y)\n print('Test examples = %d' % self.tn)\n self.i = 0\n self.batch_size = batch_size\n print('Batch size = %d' % self.batch_size)\n self.randomize = randomize\n if randomize:\n idx = np.random.permutation(self.n)\n self.train_x = self.train_x[idx]\n self.train_y = self.train_y[idx]\n print('Shuffled training data')\n self.orig_data = (np.copy(self.train_x), np.copy(self.train_y),\n np.copy(self.test_x), np.copy(self.test_y))\n \n self.embedding_save_file = embedding_save_file\n self.embedding_model_file = embedding_model_file\n self.embedding_model_layer = embedding_model_layer\n self.reshape_dims = (64*8*8,) # (64,)\n self.convert_to_embeddings()\n\n self.n_tasks = len(task_labels)\n self.task_labels = task_labels\n self.n_labels_per_task = len(task_labels[0])\n for t in self.task_labels: assert(len(t) == self.n_labels_per_task)\n self.get_taskwise_data()\n self.switch_task(0)\n\n def img_fn_cifar100(img):\n image = np.zeros((32,32,3), dtype=np.uint8)\n image[...,0] = np.reshape(img[:1024], (32,32)) # Red channel\n image[...,1] = np.reshape(img[1024:2048], (32,32)) # Green channel\n image[...,2] = np.reshape(img[2048:], (32,32)) # Blue channel\n return image\n \n self.img_fn = img_fn_cifar100\n \n def iterative_fn(self, fn, dataset, batches = 100):\n ret = []\n n = dataset.shape[0]\n per_batch_size = n // batches\n for i in tqdm(range(batches)):\n if i+1 != batches:\n ret += [fn(dataset[i*per_batch_size:(i+1)*per_batch_size])]\n else:\n ret += [fn(dataset[i*per_batch_size:])]\n ret = np.vstack(ret)\n return ret\n\n def convert_to_embeddings(self):\n if os.path.isfile(self.embedding_save_file):\n print('Embedding file %s exists, skipping embedding generation.'\n % self.embedding_save_file)\n self.etrain_x, self.etest_x = load_pickle(self.embedding_save_file)\n else:\n assert(os.path.isfile(self.embedding_model_file))\n model = load_model(self.embedding_model_file)\n print(\"Loaded model: %s\" % self.embedding_model_file)\n train_x = self.train_x.astype('float32') / 255\n train_x_mean = np.mean(train_x, axis = 0)\n train_x -= train_x_mean\n test_x = self.test_x.astype('float32') / 255\n test_x -= train_x_mean\n results = model.evaluate(test_x, to_categorical(self.test_y))\n print(\"Test acc: %s\" % results)\n intermediate_layer = model.\\\n get_layer(self.embedding_model_layer).output\n embedding_model = keras.Model(\n inputs = model.input, outputs = intermediate_layer)\n assert(len(self.reshape_dims) == 1)\n dim = self.reshape_dims[0]\n fn = lambda x: np.reshape(embedding_model.predict(x), [-1, dim])\n self.etrain_x = self.iterative_fn(fn, train_x)\n self.etest_x = self.iterative_fn(fn, test_x)\n save_pickle([self.etrain_x, self.etest_x],\n savefile = self.embedding_save_file)\n clear_session()\n print('Loaded embeddings.')\n \n # Remap class labels eg. 33,2,4 => 0, 1, 2\n def remap(self, x, classnums):\n # print(x)\n x = np.squeeze(x)\n # curr_labels = np.unique(x)\n # new_labels = {label: i for i, label in enumerate(curr_labels)}\n new_labels = {label: i for i, label in enumerate(classnums)}\n x_remapped = np.copy(x)\n for i in range(x.shape[0]):\n x_remapped[i] = new_labels[x[i]]\n # print(np.unique(x), np.unique(x_remapped))\n return x_remapped, new_labels\n\n def get_taskwise_data(self):\n self.tasks = {}\n for i in range(self.n_tasks):\n self.tasks[i] = {}\n class_nums = self.task_labels[i]\n tr_indices = np.array([np.where(self.train_y == class_num)[0] for \\\n class_num in class_nums]).flatten()\n test_indices = np.array([np.where(self.test_y == class_num)[0] for \\\n class_num in class_nums]).flatten()\n self.tasks[i]['train_x'] = self.etrain_x[tr_indices]\n self.tasks[i]['img_train_x'] = self.train_x[tr_indices]\n self.tasks[i]['train_y'], tr_labels = self.remap(self.train_y[tr_indices], class_nums)\n self.tasks[i]['n'] = len(tr_indices)\n if self.randomize:\n idx = np.random.permutation(self.tasks[i]['n'])\n self.tasks[i]['train_x'] = self.tasks[i]['train_x'][idx]\n self.tasks[i]['img_train_x'] = self.tasks[i]['img_train_x'][idx]\n self.tasks[i]['train_y'] = self.tasks[i]['train_y'][idx]\n self.tasks[i]['test_x'] = self.etest_x[test_indices]\n self.tasks[i]['img_test_x'] = self.test_x[test_indices]\n self.tasks[i]['test_y'], test_labels = self.remap(self.test_y[test_indices], class_nums)\n self.tasks[i]['tn'] = len(test_indices)\n if self.randomize:\n idx = np.random.permutation(self.tasks[i]['tn'])\n self.tasks[i]['test_x'] = self.tasks[i]['test_x'][idx]\n self.tasks[i]['img_test_x'] = self.tasks[i]['img_test_x'][idx]\n self.tasks[i]['test_y'] = self.tasks[i]['test_y'][idx]\n assert(tr_labels == test_labels)\n\n def switch_task(self, new_task_idx):\n assert(0 <= new_task_idx < self.n_tasks)\n self.curr_idx = new_task_idx\n self.n = self.tasks[self.curr_idx]['n']\n self.tn = self.tasks[self.curr_idx]['tn']\n self.train_x = self.tasks[self.curr_idx]['train_x']\n self.img_train_x = self.tasks[self.curr_idx]['img_train_x']\n self.train_y = np.squeeze(self.tasks[self.curr_idx]['train_y'])\n self.test_x = self.tasks[self.curr_idx]['test_x']\n self.img_test_x = self.tasks[self.curr_idx]['img_test_x']\n self.test_y = np.squeeze(self.tasks[self.curr_idx]['test_y'])\n # print('switch to %d: %s' % (new_task_idx, np.unique(self.test_y)))\n \n def inspect(self):\n\n print('inspect')\n\n r, c = self.n_tasks, self.n_labels_per_task\n xw = min(15, c)\n yw = max(1.5*r, 10)\n fig = plt.figure(figsize = (xw, yw))\n subplot_i = 0\n \n for task in range(self.n_tasks):\n self.switch_task(task)\n classes_to_show = np.unique(self.test_y)\n all_indices = [np.where(self.test_y == class_num)[0] for class_num in classes_to_show]\n n_ex = [len(item) for item in all_indices]\n example_indices = [np.random.choice(item) for item in all_indices]\n examples = self.img_test_x[example_indices]\n\n for i, img_idx in enumerate(classes_to_show):\n ax = fig.add_subplot(r, c, subplot_i+1)\n ax.set_xticks(())\n ax.set_yticks(())\n label_human_readable = str(img_idx) # TODO\n img = examples[img_idx]\n ax.set_xlabel(label_human_readable)\n plt.imshow(img, cmap='gray', interpolation='none')\n subplot_i += 1\n\n # plt.tight_layout(True)\n plt.savefig(\"inspect.png\")\n plt.show()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.i+self.batch_size > self.n:\n self.i = 0\n ret_data = self.train_x[self.i:self.i+self.batch_size]\n ret_labels = self.train_y[self.i:self.i+self.batch_size]\n self.i += self.batch_size\n return ret_data, ret_labels\n \n def test(self, samples = 32):\n idx = np.random.choice(self.tn, size = samples, replace = False)\n return self.test_x[idx], self.test_y[idx]" ]
[ [ "numpy.vstack", "numpy.squeeze", "numpy.zeros", "numpy.random.permutation", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.reshape", "tensorflow.keras.models.load_model", "numpy.random.choice", "numpy.copy", "tensorflow.keras.Model", "tensorflow.keras.backend.clear_session", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.where", "numpy.unique", "numpy.mean" ] ]
pranavgo/gennav
[ "fc57707912c6f1c6af208a30b2ab0ad78c2cc798" ]
[ "gennav/envs/binaryOccupancyGrid2D_env.py" ]
[ "import numpy as np\nfrom gennav.envs.base import Environment\nfrom gennav.utils.common import RobotState\nfrom gennav.utils.geometry import Point\nfrom matplotlib import pyplot as plt\n\n\nclass BinaryOccupancyGrid2DEnv(Environment):\n \"\"\"Base class for a Binary Occupancy Grid 2D envrionment.\n\n Arguments:\n X (unsigned int) : The number of grid cells in the x-direction\n Y (unsigned int) : the number of grid cells in the y-direction\n \"\"\"\n\n def __init__(self, X=10, Y=10):\n super(BinaryOccupancyGrid2DEnv, self).__init__()\n self.X = X\n self.Y = Y\n self.scan = None\n self.robotPose = None\n self.scale = 5\n self.grid = np.zeros((self.X * self.scale, self.Y * self.scale))\n\n # Storing transforms\n self.transforms = {}\n self.mapTbot = {\n \"from\": \"map\",\n \"to\": \"bot\",\n \"transform\": self.scale\n * np.array(\n [[1, 0, int(self.X / 2)], [0, 1, int(self.Y / 2)], [0, 0, 1]]\n ).reshape(3, 3),\n }\n self.botTworld = {\"from\": \"bot\", \"to\": \"world\", \"transform\": np.empty((3, 3))}\n self.mapTworld = {\n \"from\": \"map\",\n \"to\": \"world\",\n \"transform\": np.dot(self.mapTbot[\"transform\"], self.botTworld[\"transform\"]),\n }\n self.transforms[\"mapTbot\"] = self.mapTbot\n self.transforms[\"botTworld\"] = self.botTworld\n self.transforms[\"mapTworld\"] = self.mapTworld\n\n def update(self, scan, robotPose):\n \"\"\"Function to update the environment\n Args:\n scan (list) : List of ang_min, ang_max, ranges\n robotPose (gennav.utils.RobotPose) : Current RobotPose\n \"\"\"\n self.scan = scan\n self.robotPose = robotPose\n self.compute_transforms()\n self.fillOccupancy()\n\n def fillOccupancy(self):\n \"\"\"Function that fill the occupnacy grid on every update\n Assumptions:\n 1. RobotPose is considered (0, 0, 0) to accomodate the laser scan, which produces ranges wrt to the bot\n 2. The RobotPose in the occupancy grid is (X * scale_factor/2, Y * scale_factor /2, 0)\n 3. The attribute robotPose is the real pose of the robot wrt to the world Frame,\n thus it helps us to calculate the transform for trajectory and pose validity queries\n \"\"\"\n self.grid[:] = 0\n ang_min, ang_max, ranges = self.scan\n angle_step = (ang_max - ang_min) / len(ranges)\n for i, rng in enumerate(ranges):\n\n # Check for obstacles\n if np.abs(rng) is not np.inf:\n x, y = (\n rng * np.cos(ang_min + i * angle_step),\n rng * np.sin(ang_max + i * angle_step),\n )\n newState = self.transform(\"bot\", \"map\", RobotState(Point(x, y, 0)))\n x_, y_ = newState.position.x, newState.position.y\n\n # Checking if the range is within the grid, to mark them as occupied\n if 0 <= x_ < self.grid.shape[0] and 0 <= y_ < self.grid.shape[1]:\n if self.grid[int(x_)][int(-y_ - 1)] != 1:\n self.grid[int(x_)][int(-y_ - 1)] = 1\n\n def get_status(self, state):\n \"\"\"Get whether a given state is valid within the environment.\n\n Method for checking the validity of a given RobotPose in the environment.\n\n Args:\n state (gennav.utils.RobotState): State to be checked\n\n Returns:\n bool: True if state is valid otherwise False\n \"\"\"\n state = self.transform(\"world\", \"map\", state)\n x, y = state.position.x, state.position.y\n if self.grid[x][-y - 1] == 1:\n return False\n else:\n return True\n\n def get_traj_status(self, traj):\n \"\"\"Get whether a given trajectory is valid within the environment.\n\n Method for checking the validity of a trajectory in the given environment.\n\n Args:\n state (gennav.utils.Trajectory): Trajectory to be checked\n\n Returns:\n bool: True if state is valid otherwise False\n \"\"\"\n collision = False\n for i in range(len(traj.path) - 1):\n collision = self.check_line_segment(\n self.transform(\"world\", \"map\", traj.path[i]),\n self.transform(\"world\", \"map\", traj.path[i + 1]),\n )\n if collision:\n break\n return not collision\n\n def transform(self, frame1, frame2, rsf1):\n \"\"\"Transform robotPose from one pose to the other\n\n Args:\n frame1 (string) : from the frame (world, bot, map)\n frame2 (string) : to the frame (world, bot, map)\n rsf1 (gennav.utils.common.RobotState) : RobotState in frame1\n Returns:\n rsf2 (gennav.utils.common.RobotState) : RobotState in frame2\n \"\"\"\n # TODO: Make it more robust in terms of checking frames\n\n # Check if the required trnasform or the inverse of the transform exists\n frame = frame2 + \"T\" + frame1\n frame_inv = frame1 + \"T\" + frame2\n if frame in self.transforms.keys():\n t_matrix = self.transforms[frame][\"transform\"]\n elif frame_inv in self.transforms.keys():\n t_matrix = np.linalg.inv(self.transforms[frame_inv][\"transform\"])\n else:\n raise Exception(\"Transform for the frames not found\")\n\n # Transform using matrix multiplication\n pf2 = np.dot(\n t_matrix, np.array([rsf1.position.x, rsf1.position.y, 1]).reshape(3, 1)\n )\n rsf2 = RobotState(position=Point(pf2[0].item(), pf2[1].item()))\n\n # Return RobotState\n return rsf2\n\n def compute_transforms(self):\n \"\"\"Computes transforms between frames\n\n Uses robot pose to compute transform between the world frame and the bot frame\n \"\"\"\n x, y, yaw = (\n self.robotPose.position.x,\n self.robotPose.position.y,\n self.robotPose.orientation.yaw,\n )\n worldTbot = np.array(\n [[np.cos(yaw), -np.sin(yaw), x], [np.sin(yaw), np.cos(yaw), y], [0, 0, 1]]\n ).reshape(3, 3)\n self.botTworld[\"transform\"] = np.linalg.inv(worldTbot)\n self.mapTworld[\"transform\"] = np.dot(\n self.mapTbot[\"transform\"], self.botTworld[\"transform\"]\n )\n\n def visualise_grid(self):\n \"\"\"\n Helper function to visualise grid\n \"\"\"\n plt.imshow(self.grid, origin=\"bottom\", cmap=\"binary\")\n plt.show()\n\n def check_line_segment(self, state1, state2):\n \"\"\"Checks whether a line segment is collision free in the environent\n\n Computes a line segment from the start point to the end point and\n parametrically checks if the grid cells they occupy are occupied.\n\n Args:\n state1 (gennav.utils.common.RobotState) : One end point\n state2 (gennav.utils.common.RobotState) : The other end point\n \"\"\"\n point1 = state1.position\n point2 = state2.position\n x1, y1 = point1.x, point1.y\n x2, y2 = point2.x, point2.y\n m = (y2 - y1) / (x2 - x1)\n collision = False\n for x in np.arange(x1, x2, 0.5):\n y = m * x - m * x1 + y1\n if self.grid[int(x)][int(-y - 1)] == 1:\n collision = True\n break\n return collision\n" ]
[ [ "numpy.empty", "numpy.zeros", "numpy.linalg.inv", "numpy.abs", "numpy.cos", "numpy.arange", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.array", "numpy.sin", "numpy.dot" ] ]
elena-kolomeets/lowfat
[ "f7647f5cd12519f722e41808157a96cc3e37b6ce" ]
[ "lowfat/management/commands/load2019applications.py" ]
[ "import pandas as pd\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import BaseUserManager\nfrom django.core.management.base import BaseCommand\nfrom django.db import IntegrityError\n\nfrom lowfat.models import Claimant\n\nclass Command(BaseCommand):\n help = \"Import CSV with 2019 applications.\"\n\n def add_arguments(self, parser):\n parser.add_argument('csv', nargs='?', default='2019.csv')\n\n # pylint: disable=too-many-branches,too-many-locals\n def handle(self, *args, **options):\n fail_list = []\n success_list = []\n user_manager = BaseUserManager()\n\n data = pd.read_csv(options['csv'])\n for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable\n\n received_offer = line['Invited'] == 'YES'\n if line[\"Research Classification\"] == \"N/A - I do not do research\":\n jacs = \"Y0\"\n else:\n jacs = line[\"Research Classification\"][1:3]\n\n applicants_dict = {\n \"application_year\": 2018,\n \"fellow\": False,\n \"received_offer\": received_offer,\n \"forenames\": line[\"First name\"],\n \"surname\": line[\"Surname\"],\n \"affiliation\": line[\"Home Institution\"],\n \"department\": line[\"Department\"] if pd.notnull(line[\"Department\"]) else \"\",\n \"group\": line[\"Group within Department\"] if pd.notnull(line[\"Group within Department\"]) else \"\",\n \"career_stage_when_apply\": line[\"Career stage\"][6],\n \"job_title_when_apply\": line[\"Job Title\"],\n \"research_area\": line[\"Area of work\"],\n \"research_area_code\": jacs,\n \"email\": line[\"Email Address\"],\n \"phone\": line[\"Telephone number\"],\n \"gender\": line[\"Gender\"][0] if pd.notnull(line[\"Gender\"]) else 'R',\n \"home_country\": \"GB\",\n \"home_city\": \"Unknow\",\n \"funding\": line[\"Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work\"],\n \"funding_notes\": line[\"Which additional funding body/charity/organisation would you probably turn to if seeking financial support for your research/work\"] if pd.notnull(line[\"Which additional funding body/charity/organisation would you probably turn to if seeking financial support for your research/work\"]) else \"\",\n \"claimantship_grant\": 3000 if received_offer else 0,\n \"institutional_website\": line[\"Please specify your Institutional webpage\"] if pd.notnull(line[\"Please specify your Institutional webpage\"]) else \"\",\n \"website\": line[\"Please specify your blog\"] if pd.notnull(line[\"Please specify your blog\"]) else \"\",\n \"orcid\": line[\"Please specify your ORCID\"] if pd.notnull(line[\"Please specify your ORCID\"]) else \"\",\n \"google_scholar\": line[\"Please specify your Google Scholar\"] if pd.notnull(line[\"Please specify your Google Scholar\"]) else \"\",\n \"twitter\": line[\"Please specify your Twitter handle\"] if pd.notnull(line[\"Please specify your Twitter handle\"]) else \"\",\n \"screencast_url\": line[\"Application Screencast URL\"] if pd.notnull(line[\"Application Screencast URL\"]) else \"\",\n \"example_of_writing_url\": line[\"Example of writing\"] if pd.notnull(line[\"Example of writing\"]) else \"\",\n }\n\n try:\n applicant = Claimant(**applicants_dict)\n applicant.save()\n success_list.append(index)\n\n if received_offer:\n new_user = get_user_model().objects.create_user(\n username=applicant.slug,\n email=applicant.email,\n password=user_manager.make_random_password(),\n first_name=line[\"First name\"],\n last_name=line[\"Surname\"]\n )\n applicant.user = new_user\n applicant.save()\n\n except IntegrityError as exception:\n try:\n applicant = Claimant.objects.get(\n email=applicants_dict[\"email\"]\n )\n for key, value in applicants_dict.items():\n applicant[key] = value\n\n applicant.save()\n success_list.append(index)\n\n if received_offer:\n new_user = get_user_model().objects.create_user(\n username=applicant.slug,\n email=applicant.email,\n password=user_manager.make_random_password(),\n first_name=line[\"First name\"],\n last_name=line[\"Surname\"]\n )\n applicant.user = new_user\n applicant.save()\n\n except BaseException as exception:\n print(\"Error: {}\\n{}\\n{}\".format(exception, line, 80 * \"-\"))\n fail_list.append(index)\n\n except BaseException as exception:\n print(\"Error: {}\\n{}\\n{}\".format(exception, line, 80 * \"-\"))\n fail_list.append(index)\n\n print(80 * \"-\")\n print(\"Success: {}\".format(success_list))\n print(\"Fail: {}\".format(fail_list))\n" ]
[ [ "pandas.read_csv", "pandas.notnull" ] ]
meracan/s3-netcdf-api
[ "920d09ef7b1a205230ea2c76eabcb4853616992c" ]
[ "test/other/dataTest.old.py" ]
[ "import numpy as np\nfrom datetime import datetime\n\nnpe=3\nnelem=20\nnnode=10\nnstation=27\nnsnode=3\nntime=8760\nnfreq=3\nndir=5\n\nelem=np.arange(nelem*npe,dtype=\"i4\").reshape((nelem,npe))\ntime=np.datetime64(datetime(2000,1,1))+np.arange((ntime))*np.timedelta64(1, 'h')\nlat=np.arange((nnode),dtype=\"f8\")\nlon=np.arange((nnode),dtype=\"f8\")\nnodes=np.column_stack((lon,lat))\nbed=np.arange((nnode),dtype=\"f4\")\nslat=np.arange((nstation),dtype=\"f8\")\nslon=np.arange((nstation),dtype=\"f8\")\nfreq=np.arange((nfreq),dtype=\"f8\")\ndir=np.arange((ndir),dtype=\"f8\")\n\nnshape=ntime*nnode\nshape=(ntime,nnode)\nvariables={\n \"WIND\":{\n \"Windv_x\":np.arange(nshape,dtype=\"f4\").reshape(shape),\n \"Windv_y\":np.arange(nshape,dtype=\"f4\").reshape(shape),\n },\n \"HS\":{\"Hsig\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"DIR\":{ \"Dir\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \n \"TPS\":{\"TPsmoo\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"TMM10\":{\"Tm_10\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"TM01\":{\"Tm01\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"TM02\":{\"Tm02\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \n \"PDIR\":{\"PkDir\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"DSPR\":{\"Dspr\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"QP\":{\"Qp\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"TRANSP\":{\"Transp_x\":np.arange(nshape,dtype=\"f4\").reshape(shape),\"Transp_y\":np.arange(nshape,dtype=\"f4\").reshape(shape),}\n \n }\n\n\nnshape=nstation*nsnode*ntime*nfreq*ndir\nshape=(nstation,nsnode,ntime,nfreq,ndir)\n\nspcgroup={\n \"spectra\":(np.arange(nshape,dtype=\"f8\")).reshape(shape)\n}\n\nstations={\n \"beverly\": 1,\n \"brooks\": 1,\n \"c_dixon\": 1,\n \"c_eliz\": 1,\n \"campbell\": 1,\n \"e_dell\": 1,\n \"hotspots\": 2,\n \"line_n\": 2,\n \"line_w\": 3,\n \"line_s\": 2,\n \"m_nomad\": 1,\n \"n_hecat\": 1,\n \"ne_isle\": 1,\n \"neah\": 2,\n \"p_renf\": 1,\n \"perouse\": 1,\n \"s_hecat\": 1,\n \"s_morsb\": 1,\n \"s_nomad\": 1,\n \"sombrio\": 1,\n \"sooke\": 1,\n \"tarbotn\": 1,\n \"tillamk\": 1,\n \"tofino\": 1,\n \"w_dixon\": 1,\n \"w_morsb\": 1,\n \"w_otter\": 1,\n \"w_washn\": 1\n}\n# Create lat lng for each station\nfor i,id in enumerate(stations):\n c=np.array([[1.0,1.0]])\n stations[id]={\"id\":i,\"nsnodes\":stations[id],\"latlng\":((np.arange(stations[id])+1)*i)[:,np.newaxis]*c}" ]
[ [ "numpy.arange", "numpy.array", "numpy.column_stack", "numpy.timedelta64" ] ]
cafe-com-analytics/stock_market_index_daily_direction
[ "e05eced04d3f0ae3134315de0163bfdf140c1e4a" ]
[ "src/features/build_features.py" ]
[ "import numpy as np\nimport pandas as pd\nimport yfinance as yf\n\n\ndef downloading_stocks_data(dct, start_date: str = \"2021-01-01\", end_date: str = \"2021-07-01\") -> pd.DataFrame:\n \"\"\"\n Download the stocks daily information from tickers listed as keys of a dictionary, gets only \"Close\" price from\n each day within start_date and end_date.\n\n Args:\n dct (dict): format {'ticker': {'name': name, etc}}\n start_date (str, optional): [description]. Defaults to \"2011-01-01\".\n end_date (str, optional): [description]. Defaults to \"2022-01-01\".\n\n Returns:\n pd.DataFrame: dataframe of close prices of each ticker.\n \"\"\"\n df = yf.download(list(dct.keys())[0], start=start_date, end=end_date, show_errors=False)[[\"Close\"]]\n df.columns = [dct[list(dct.keys())[0]][\"name\"]]\n\n for market_index in list(dct.keys())[1:]:\n df_temp = yf.download(market_index, start=start_date, end=end_date)[[\"Close\"]]\n df_temp.columns = [dct[market_index][\"name\"]]\n df = df.merge(df_temp, how='left', left_index=True, right_index=True)\n\n df.dropna(how='all', axis=0, inplace=True)\n df.fillna(method='ffill', inplace=True)\n df.fillna(method='bfill', inplace=True)\n\n return df\n\n\ndef daily_return(df, lst_columns: list = 'all') -> pd.DataFrame:\n \"\"\"\n Return the daily return of the lst_columns.\n \"\"\"\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n for column in lst_columns:\n df[column] = (np.log(df[column]) - np.log(df[column].shift(periods=1)))*100\n\n df.dropna(axis=0, how='all', inplace=True)\n\n return df\n\n\ndef return_in_period(df, lst_columns: list = 'all') -> pd.DataFrame:\n \"\"\"\n Return the return of the lst_columns.\n \"\"\"\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n for column in lst_columns:\n df[column] = df[column]/df[column][0]\n\n return df\n\n\ndef create_shifted_rt(df, rts: list, column_name: str = 'Close') -> pd.DataFrame:\n \"\"\"\n Return a dataframe with new lagged columns according to a rts' list.\n\n Args:\n df (pd.DataFrame): [description]\n rts (list): list with int values. Each value represents a lag in period.\n column_name (str, optional): [description]. Defaults to 'Close'.\n\n Returns:\n pd.DataFrame: [description]\n \"\"\"\n for t in rts:\n df[f\"rt-{t}\"] = df[column_name].shift(periods=t)\n return df\n\n\ndef uniform_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:\n \"\"\"This function creates the target \"Cluster\" according to the limits described in (2011, Zuo and Kita).\"\"\"\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n for column in lst_columns:\n conditions = [\n df[column] < -1.12,\n (df[column] >= -1.12) & (df[column] < -0.42),\n (df[column] >= -0.42) & (df[column] < 0),\n (df[column] >= 0) & (df[column] < 0.44),\n (df[column] >= 0.44) & (df[column] < 1.07),\n df[column] >= 1.07]\n\n choices = [1, 2, 3, 4, 5, 6]\n df[\"cluster_\"+column] = np.select(conditions, choices, default=np.nan)\n\n return df\n\n\ndef binary_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:\n \"\"\"\n This function creates the target \"Cluster\" according to the limits described in article.\n\n Args:\n df (pd.DataFrame): [description]\n lst_columns (list): [description]\n\n Returns:\n pd.DataFrame: return 'cluster_'+column with values 1 for positive return and 0 for equal or below zero.\n \"\"\"\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n for column in lst_columns:\n df[\"cluster_\"+column] = np.where(df[column] > 0, 1, 0)\n\n return df\n\n\ndef boxplot_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n df_boxplot = df.describe().T\n quartile_1 = df_boxplot[\"25%\"][0]\n quartile_2 = df_boxplot[\"50%\"][0]\n quartile_3 = df_boxplot[\"75%\"][0]\n\n for column in lst_columns:\n conditions = [\n (df[column] < quartile_1),\n (df[column] >= quartile_1) & (df[column] < quartile_2),\n (df[column] >= quartile_2) & (df[column] < quartile_3),\n (df[column] >= quartile_3)]\n\n choices = [int(1), int(2), int(3), int(4)]\n df[\"cluster_\"+column] = np.select(conditions, choices, default=np.nan)\n\n return df\n" ]
[ [ "numpy.where", "numpy.log", "numpy.select" ] ]
clear-nus/BOIRL
[ "cc872111fda3c7b8118e1a864831013c30f63948" ]
[ "bayesian_irl/src/utils/prob_dists.py" ]
[ "import scipy.stats\nimport numpy as np\nfrom scipy.stats import multivariate_normal as MVG\n\nclass UniformDist:\n def __init__(self, xmax=1., xmin=None):\n self.xmax = xmax\n self.xmin = - xmax if xmin is None else xmin\n self.prob = 1 / (self.xmax - self.xmin)\n\n def __call__(self, *args, **kwargs):\n return self.prob\n\n def __str__(self):\n return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)\n\nclass MultiuniformDist:\n def __init__(self, xmax=[2.,10.], xmin=[-2.,-10.]):\n self.xmax = xmax\n self.xmin = - xmax if xmin is None else xmin\n self.prob = (1 / (self.xmax[0] - self.xmin[0]))*(1 / (self.xmax[1] - self.xmin[1]))\n\n def __call__(self, *args, **kwargs):\n return self.prob\n\n def __str__(self):\n return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)\n\nclass MultiuniformborlangeDist:\n def __init__(self, xmax=[0., 0.], xmin=[-2.5,-2.5]):\n self.xmax = xmax\n self.xmin = - xmax if xmin is None else xmin\n self.prob = (1 / (self.xmax[0] - self.xmin[0]))*(1 / (self.xmax[1] - self.xmin[1]))\n\n def __call__(self, *args, **kwargs):\n return self.prob\n\n def __str__(self):\n return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)\n\n\nclass DistBase:\n def __init__(self, dist, params):\n self.dist = dist\n self.params = params\n\n def __call__(self, x):\n \"\"\"\n :x: input\n :return: P(x)\n \"\"\"\n return np.exp(np.sum(self.dist.logpdf(x, **self.params)))\n\n def sample(self, size=10):\n return self.dist.rvs(size=size, **self.params)\n\n def __str__(self):\n return self.__class__.__name__ + '(' + ', '.join(['{}={}'.format(key, value)\n for key, value in self.params.items()]) + ')'\n\n\nclass GaussianDist(DistBase):\n def __init__(self, loc=0, scale=0.1):\n \"\"\"\n :param loc: location of gaussian distribution\n :param scale: var == scale ** 2\n \"\"\"\n params = dict(loc=loc, scale=scale)\n dist = scipy.stats.norm\n super().__init__(dist=dist, params=params)\n\n\nclass MultigaussDist(DistBase):\n def __init__(self, mean=np.array([1.25, 5.0]), cov=np.array([[1, 0], [0, 1]])):\n \"\"\"\n :param loc: location of gaussian distribution\n :param scale: var == scale ** 2\n \"\"\"\n #params = dict(mean=mean, cov=cov)\n self.rvs = MVG(mean=mean,cov=cov)\n #super().__init__(dist=dist, params=params)\n\n def __call__(self, x):\n return np.exp(np.sum(self.rvs.logpdf(x)))\n\nclass MultigaussBorlangeDist(DistBase):\n def __init__(self, dist, mean=np.array([-2, -1.0, -1]), cov=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])):\n \"\"\"\n :param loc: location of gaussian distribution\n :param scale: var == scale ** 2\n \"\"\"\n #params = dict(mean=mean, cov=cov)\n self.rvs = MVG(mean=mean,cov=cov)\n #super().__init__(dist=dist, params=params)\n\n def __call__(self, x):\n return np.exp(np.sum(self.rvs.logpdf(x)))\n\n\n\n\nclass BetaDist(DistBase):\n def __init__(self, a=0.5, b=0.5, loc=0, scale=1):\n params = dict(a=a, b=b, loc=loc, scale=scale)\n dist = scipy.stats.beta\n super().__init__(dist=dist, params=params)\n\n\nclass GammaDist(DistBase):\n def __init__(self, a=2, loc=0, scale=1):\n params = dict(a=a, loc=loc, scale=scale)\n dist = scipy.stats.gamma\n super().__init__(dist=dist, params=params)\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import os\n\n dists = (GaussianDist, BetaDist, GammaDist)\n for dist in dists:\n distribution = dist()\n samples = distribution.sample(size=100)\n plt.hist(samples)\n plt.title(distribution)\n path = '/' + os.path.join(*os.path.abspath(__file__).split('/')[:-3], 'results',\n '{}.png'.format(dist.__name__))\n plt.savefig(path)\n plt.cla()\n" ]
[ [ "matplotlib.pyplot.cla", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.hist", "numpy.array", "scipy.stats.multivariate_normal" ] ]
edesz/electricity-consumption-forecast
[ "9bc49523d9c2ed6d827ce690916980cf7e818fed" ]
[ "src/data_prep_helpers.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport pandas as pd\n\n\ndef add_corona_dates(df, index_name, strategy=[\"during_corona\", \"no_corona\"]):\n \"\"\"\n Inputs\n ------\n strategy : List\n division of datetimes based on stages of corona; acceptable strategies\n are one of the following (order in list does not matter)\n - ['during_corona', 'no_corona']\n - ['pre_corona', 'during_corona', 'post_corona']\n\n SOURCE\n ------\n https://github.com/facebook/prophet/issues/1416#issuecomment-618553502\n \"\"\"\n d_corona = {\n \"BE\": [\n pd.to_datetime(\"2020-03-07 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"CH\": [\n pd.to_datetime(\"2020-03-07 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"CZ\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"DE\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"ES\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"FR\": [\n pd.to_datetime(\"2020-03-07 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"HR\": [\n pd.to_datetime(\"2020-03-21 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"IT\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"NL\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"PL\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n }\n df_corona = (\n pd.DataFrame.from_dict(d_corona, orient=\"index\")\n .reset_index()\n .rename(\n columns={0: \"corona_start\", 1: \"corona_end\", \"index\": \"country\"}\n )\n )\n df = df.merge(df_corona, on=\"country\", how=\"left\")\n\n # Add corona periods based on specified strategy\n strategies_dict = {\n \"dn\": [\"during_corona\", \"no_corona\"],\n \"pdp\": [\"pre_corona\", \"during_corona\", \"post_corona\"],\n }\n if set(strategy) == set(strategies_dict[\"dn\"]):\n df[\"no_corona\"] = (df[index_name] < df[\"corona_start\"]) | (\n df[index_name] > df[\"corona_end\"]\n )\n elif set(strategy) == set(strategies_dict[\"pdp\"]):\n df[\"pre_corona\"] = df[index_name] < df[\"corona_start\"]\n df[\"post_corona\"] = df[index_name] > df[\"corona_end\"]\n else:\n strategies = \"\"\n for _, v in strategies_dict.items():\n strategies += \"['\" + \"', '\".join(map(str, v)) + \"'], \"\n strategies = strategies.rstrip(\", \")\n raise Exception(\n f\"Unsupported corona strategy. Expected one of: {strategies}\"\n )\n df[\"during_corona\"] = (df[index_name] >= df[\"corona_start\"]) & (\n df[index_name] <= df[\"corona_end\"]\n )\n return df\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame.from_dict" ] ]
barcawy/OpenNE
[ "88018ed9bf34d09020464a430e09afb704b1f322" ]
[ "src/openne/Z_0709.py" ]
[ "from __future__ import print_function\nimport time\nimport math\nimport random\nimport numpy as np\nimport pickle as pkl\nimport networkx as nx\nfrom gensim.models import Word2Vec\nfrom fastdtw import fastdtw\nfrom collections import Counter\nfrom collections import defaultdict\nimport os\n\nclass Z(object):\n\n def __init__(self, graph, path_length, num_paths, dim, prefix, hop, **kwargs):\n\n kwargs[\"workers\"] = kwargs.get(\"workers\", 4)\n\n #kwargs[\"hs\"] = 1 # 1 分层softmax 0 负采样\n\n self.graph = graph\n preprocess = False\n if preprocess:\n self.ppr_matrix = self.constructSubGraph(hop)\n self.degrees, self.degree_permuted = self.create_degree()\n self.degree_neighbors, self.norm_weight = self.create_ppr_sample_table()\n self.dump_to_disk(self.degree_neighbors,'E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_neighbors')\n self.dump_to_disk(self.norm_weight,'E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_weight')\n else:\n self.degree_neighbors = self.load_pkl('E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_neighbors')\n self.norm_weight = self.load_pkl('E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_weight')\n sentences = self.simulate_walks(\n num_walks=num_paths, walk_length=path_length)\n kwargs[\"sentences\"] = sentences\n kwargs[\"min_count\"] = kwargs.get(\"min_count\", 0)\n kwargs[\"size\"] = kwargs.get(\"size\", dim)\n kwargs[\"sg\"] = 1 # 1 skipgram; 0 CBOW\n\n self.size = kwargs[\"size\"]\n print(\"Learning representation...\")\n word2vec = Word2Vec(**kwargs)\n self.vectors = {}\n for word in graph.G.nodes():\n self.vectors[word] = word2vec.wv[word]\n del word2vec\n\n def dump_to_disk(self, f, file_name):\n with open(file_name + '.pkl', 'wb') as handle:\n pkl.dump(f, handle, protocol=pkl.HIGHEST_PROTOCOL)\n\n def load_pkl(self, file_name):\n with open(file_name + '.pkl', 'rb') as handle:\n val = pkl.load(handle)\n return val\n\n def neighbors(self, fringe):\n # find all 1-hop neighbors of nodes in fringe from A\n graph = self.graph.G\n res = set()\n for node in fringe:\n nei = graph.neighbors(node)\n nei = set(nei)\n res = res.union(nei)\n return res\n\n def constructSubGraph(self, hop):\n graph = self.graph.G\n edge_set = set(graph.edges())\n nodes = list(graph.nodes())\n #subgraph_map = defaultdict(nx.Graph)\n ppr_matrix = {}\n for node in nodes:\n subgraph_map = nx.Graph()\n subgraph_map.add_node(node)\n fringe = set(node)\n visited = set(node)\n for dist in range(0, hop):\n fringe = self.neighbors(fringe)\n fringe = fringe - visited\n visited = visited.union(fringe)\n visited = list(visited)\n for pos_u, u in enumerate(visited):\n for v in visited[pos_u+1:]:\n if (u, v) in edge_set or (v, u) in edge_set:\n subgraph_map.add_edge(u, v)\n\n ppr_matrix[node] = Counter()\n walk = self.subgraph_walk(subgraph_map, walk_length=500, start_node=node)\n ppr_matrix[node].update(walk)\n return ppr_matrix\n\n def subgraph_walk(self, subGraph, walk_length, start_node):\n '''\n Simulate a random walk starting from start node.\n '''\n G = subGraph\n walk = [start_node]\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = list(G.neighbors(cur))\n if len(cur_nbrs) > 0:\n walk.append(random.choice(cur_nbrs))\n else:\n # 独立的点\n break\n return walk\n\n def deepwalk_walk(self, walk_length, start_node, alpha = 0.5):\n '''\n Simulate a random walk starting from start node.\n '''\n G = self.graph.G\n walk = [start_node]\n while len(walk) < walk_length:\n cur = walk[-1]\n alpha = 1#alpha/G.degree(cur)\n if np.random.rand() < alpha:\n walk.append(np.random.choice(self.degree_neighbors[cur], p=self.norm_weight[cur]))\n else:\n cur_nbrs = list(G.neighbors(cur))\n if len(cur_nbrs) > 0:\n # node2vec\n n2v = 0\n if n2v:\n nbr = random.choice(cur_nbrs)\n if set(cur_nbrs) & set(G.neighbors(nbr)):\n walk.append(random.choice(cur_nbrs))\n else:\n walk.append(nbr)\n else:\n # deepwalk\n walk.append(random.choice(cur_nbrs))\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n '''\n Repeatedly simulate random walks from each node.\n '''\n G = self.graph.G\n walks = []\n nodes = list(G.nodes())\n print('Simulate walk iteration:')\n for walk_iter in range(num_walks):\n # pool = multiprocessing.Pool(processes = 4)\n print(str(walk_iter + 1), '/', str(num_walks))\n random.shuffle(nodes)\n for node in nodes:\n # walks.append(pool.apply_async(deepwalk_walk_wrapper, (self, walk_length, node, )))\n walks.append(self.deepwalk_walk(\n walk_length=walk_length, start_node=node))\n # pool.close()\n # pool.join()\n # print(len(walks))\n return walks\n\n def create_degree(self):\n G = self.graph.G\n print(\"- Creating degree vectors...\")\n degrees = {}\n degrees_sorted = set()\n degree_permuted = {}\n for v in G.nodes():\n degree = G.degree(v)\n degrees_sorted.add(degree)\n degree_permuted[v] = degree\n if (degree not in degrees):\n degrees[degree] = {}\n degrees[degree]['vertices'] = []\n degrees[degree]['vertices'].append(v)\n degrees_sorted = np.array(list(degrees_sorted), dtype='int')\n # degree_permuted = degrees_sorted\n degrees_sorted = np.sort(degrees_sorted)\n l = len(degrees_sorted)\n for index, degree in enumerate(degrees_sorted):\n if (index > 0):\n degrees[degree]['before'] = degrees_sorted[index - 1]\n if (index < (l - 1)):\n degrees[degree]['after'] = degrees_sorted[index + 1]\n print(\"- Degree vectors created.\")\n return degrees, degree_permuted\n\n def create_ppr_sample_table(self):\n print(\"- Creating PPR sample table ...\")\n nodes = list(self.graph.G.nodes())\n degree_neighbors = {}\n norm_weight = {}\n nodes_num = len(nodes)\n k = 0\n for node in nodes:\n print(str(k + 1), '/', str(nodes_num))\n k += 1\n degree_neighbors[node] = self.get_vertices(node)\n norm_weight[node] = self.ppr_sample(node, degree_neighbors[node])\n print(\"- PPR sample table created.\")\n return degree_neighbors, norm_weight\n\n def cost(self, a, b):\n ep = 0.001\n m = max(a, b) + ep\n mi = min(a, b) + ep\n return ((m / mi) - 1)\n\n def ppr_sample(self, node, neighbors):\n node_ppr_v = [i[1] for i in self.ppr_matrix[node].most_common()]#[1:]\n if len(node_ppr_v) == 0:\n node_ppr_v = [1]\n sim_list = []\n nodes_num = len(self.graph.G.nodes())\n for _neighbor in neighbors:\n neighbor_ppr_v = [i[1] for i in self.ppr_matrix[_neighbor].most_common()]#[1:]\n if len(neighbor_ppr_v) == 0:\n neighbor_ppr_v = [1]\n dits_dtw, _ = fastdtw(node_ppr_v, neighbor_ppr_v, radius=1, dist=self.cost)\n sim_list.append(np.exp(-1.0 * dits_dtw))\n\n norm_weight = [float(i) / sum(sim_list) for i in sim_list]\n # sampled_neighbor = np.random.choice(neighbors, p=norm_weight)\n return norm_weight\n\n def verifyDegrees(self, degree_v_root, degree_a, degree_b):\n\n if (degree_b == -1):\n degree_now = degree_a\n elif (degree_a == -1):\n degree_now = degree_b\n elif (abs(degree_b - degree_v_root) < abs(degree_a - degree_v_root)):\n degree_now = degree_b\n else:\n degree_now = degree_a\n\n return degree_now\n\n def get_vertices(self, v):\n num_seleted = 2 * math.log(len(self.graph.G.nodes()), 2)\n vertices = []\n\n degree_v = self.graph.G.degree(v)\n\n try:\n c_v = 0\n\n for v2 in self.degrees[degree_v]['vertices']:\n if (v != v2):\n vertices.append(v2)\n c_v += 1\n if (c_v > num_seleted):\n raise StopIteration\n\n if ('before' not in self.degrees[degree_v]):\n degree_b = -1\n else:\n degree_b = self.degrees[degree_v]['before']\n if ('after' not in self.degrees[degree_v]):\n degree_a = -1\n else:\n degree_a = self.degrees[degree_v]['after']\n if (degree_b == -1 and degree_a == -1):\n raise StopIteration\n degree_now = self.verifyDegrees(degree_v, degree_a, degree_b)\n\n while True:\n for v2 in self.degrees[degree_now]['vertices']:\n if (v != v2):\n vertices.append(v2)\n c_v += 1\n if (c_v > num_seleted):\n raise StopIteration\n\n if (degree_now == degree_b):\n if ('before' not in self.degrees[degree_b]):\n degree_b = -1\n else:\n degree_b = self.degrees[degree_b]['before']\n else:\n if ('after' not in self.degrees[degree_a]):\n degree_a = -1\n else:\n degree_a = self.degrees[degree_a]['after']\n\n if (degree_b == -1 and degree_a == -1):\n raise StopIteration\n\n degree_now = self.verifyDegrees(degree_v, degree_a, degree_b)\n\n except StopIteration:\n return list(vertices)\n\n return list(vertices)\n\n def save_embeddings(self, filename):\n fout = open(filename, 'w')\n node_num = len(self.vectors.keys())\n fout.write(\"{} {}\\n\".format(node_num, self.size))\n for node, vec in self.vectors.items():\n fout.write(\"{} {}\\n\".format(node,\n ' '.join([str(x) for x in vec])))\n fout.close()\n\n def save_results(self, filename, method, ratio, result):\n fout = open(filename, 'w')\n node_num = len(self.vectors)\n fout.write(\"{} {} {} \\n\".format(method, ratio, result))\n fout.close()\n\n" ]
[ [ "numpy.sort", "numpy.random.rand", "numpy.exp", "numpy.random.choice" ] ]
strawberrypie/jina-hub
[ "8b2356d58687694d817881c840745214f12e94c4", "8b2356d58687694d817881c840745214f12e94c4" ]
[ "crafters/image/ImageNormalizer/__init__.py", "crafters/image/ImageReader/tests/test_imagereader.py" ]
[ "__copyright__ = \"Copyright (c) 2021 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nfrom typing import Tuple, Dict, Union, Iterable\n\nimport numpy as np\n\nfrom jina.executors.decorators import single\nfrom jina.executors.crafters import BaseCrafter\n\nfrom .helper import _load_image, _move_channel_axis, _crop_image, _resize_short\n\n\nclass ImageNormalizer(BaseCrafter):\n \"\"\"\n Normalize the image.\n\n :class:`ImageNormalizer` works on doc-level,\n it receives values of file names on the\n doc-level and returns image matrix on the chunk-level\n\n :param target_size: Desired output size. If size is a sequence\n like (h, w), the output size will be matched to this.\n If size is an int, the smaller edge of the image will be matched\n to this number maintaining the aspect ratio.\n :param img_mean: The mean of the images in `RGB` channels.\n Set to `[0.485, 0.456, 0.406]` for the models trained\n on `imagenet` with pytorch backbone.\n :param img_std: the std of the images in `RGB` channels.\n Set to `[0.229, 0.224, 0.225]` for the models trained\n on `imagenet` with pytorch backbone.\n :param resize_dim: the size of images' height and width to be resized to.\n The images are resized before cropping to the output size\n :param channel_axis: the axis id of the color channel,\n ``-1`` indicates the color channel info at the last axis\n \"\"\"\n\n def __init__(self,\n target_size: Union[Iterable[int], int] = 224,\n img_mean: Tuple[float] = (0, 0, 0),\n img_std: Tuple[float] = (1, 1, 1),\n resize_dim: int = 256,\n channel_axis: int = -1,\n *args,\n **kwargs):\n \"\"\"Set Constructor.\"\"\"\n super().__init__(*args, **kwargs)\n if isinstance(target_size, int):\n self.target_size = target_size\n elif isinstance(target_size, Iterable):\n self.target_size = tuple(target_size)\n else:\n raise ValueError(f'target_size {target_size} should be an integer or tuple/list of 2 integers')\n self.resize_dim = resize_dim\n self.img_mean = np.array(img_mean).reshape((1, 1, 3))\n self.img_std = np.array(img_std).reshape((1, 1, 3))\n self.channel_axis = channel_axis\n\n @single\n def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:\n \"\"\"\n Normalize the image.\n\n :param blob: the ndarray of the image with the color channel at the last axis\n :return: a chunk dict with the normalized image\n \"\"\"\n raw_img = _load_image(blob, self.channel_axis)\n _img = self._normalize(raw_img)\n img = _move_channel_axis(_img, -1, self.channel_axis)\n return dict(offset=0, blob=img)\n\n def _normalize(self, img):\n img = _resize_short(img, target_size=self.resize_dim)\n img, _, _ = _crop_image(img, target_size=self.target_size, how='center')\n img = np.array(img).astype('float32')/255\n img -= self.img_mean\n img /= self.img_std\n return img\n", "import io\n\nimport numpy as np\nfrom PIL import Image\n\nfrom .. import ImageReader\n\n\ndef create_test_image(output_fn, size_width=50, size_height=50):\n from PIL import Image\n image = Image.new('RGB', size=(size_width, size_height), color=(155, 0, 0))\n with open(output_fn, \"wb\") as f:\n image.save(f, 'jpeg')\n\n\ndef test_io_uri():\n crafter = ImageReader()\n tmp_fn = crafter.get_file_from_workspace('test.jpeg')\n img_size = 50\n create_test_image(tmp_fn, size_width=img_size, size_height=img_size)\n test_docs = crafter.craft([None, None], np.stack([tmp_fn, tmp_fn]))\n assert len(test_docs) == 2\n for test_doc in test_docs:\n assert test_doc['blob'].shape == (img_size, img_size, 3)\n\n\ndef test_io_buffer():\n crafter = ImageReader()\n tmp_fn = crafter.get_file_from_workspace('test.jpeg')\n img_size = 50\n create_test_image(tmp_fn, size_width=img_size, size_height=img_size)\n image_buffer = io.BytesIO()\n img = Image.open(tmp_fn)\n img.save(image_buffer, format='PNG')\n image_buffer.seek(0)\n test_docs = crafter.craft(np.stack([image_buffer.getvalue(), image_buffer.getvalue()]), [None, None])\n assert len(test_docs) == 2\n for test_doc in test_docs:\n assert test_doc['blob'].shape == (img_size, img_size, 3)\n np.testing.assert_almost_equal(test_doc['blob'], np.array(img).astype('float32'))\n" ]
[ [ "numpy.array" ], [ "numpy.stack", "numpy.array" ] ]
qiguming/mmdetection
[ "68532eb6f4643ddf0179a4384c8c9e004a2c1d07" ]
[ "mmdet/ops/point_sample.py" ]
[ "# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.utils import _pair\n\n\ndef normalize(grid):\n \"\"\"Normalize input grid from [-1, 1] to [0, 1]\n\n Args:\n grid (Tensor): The grid to be normalize, range [-1, 1].\n\n Returns:\n Tensor: Normalized grid, range [0, 1].\n \"\"\"\n\n return (grid + 1.0) / 2.0\n\n\ndef denormalize(grid):\n \"\"\"Denormalize input grid from range [0, 1] to [-1, 1]\n Args:\n grid (Tensor): The grid to be denormalize, range [0, 1].\n\n Returns:\n Tensor: Denormalized grid, range [-1, 1].\n \"\"\"\n\n return grid * 2.0 - 1.0\n\n\ndef generate_grid(num_grid, size, device):\n \"\"\"Generate regular square grid of points in [0, 1] x [0, 1] coordinate\n space.\n\n Args:\n num_grid (int): The number of grids to sample, one for each region.\n size (tuple(int, int)): The side size of the regular grid.\n device (torch.device): Desired device of returned tensor.\n\n Returns:\n (torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that\n contains coordinates for the regular grids.\n \"\"\"\n\n affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)\n grid = F.affine_grid(\n affine_trans, torch.Size((1, 1, *size)), align_corners=False)\n grid = normalize(grid)\n return grid.view(1, -1, 2).expand(num_grid, -1, -1)\n\n\ndef rel_roi_point_to_abs_img_point(rois, rel_roi_points):\n \"\"\"Convert roi based relative point coordinates to image based absolute\n point coordinates.\n\n Args:\n rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)\n rel_roi_points (Tensor): Point coordinates inside RoI, relative to\n RoI, location, range (0, 1), shape (N, P, 2)\n\n Returns:\n Tensor: Image based absolute point coordinates, shape (N, P, 2)\n \"\"\"\n\n with torch.no_grad():\n assert rel_roi_points.size(0) == rois.size(0)\n assert rois.dim() == 2\n assert rel_roi_points.dim() == 3\n assert rel_roi_points.size(2) == 2\n # remove batch idx\n if rois.size(1) == 5:\n rois = rois[:, 1:]\n abs_img_points = rel_roi_points.clone()\n abs_img_points[:, :, 0] = abs_img_points[:, :, 0] * (\n rois[:, None, 2] - rois[:, None, 0])\n abs_img_points[:, :, 1] = abs_img_points[:, :, 1] * (\n rois[:, None, 3] - rois[:, None, 1])\n abs_img_points[:, :, 0] += rois[:, None, 0]\n abs_img_points[:, :, 1] += rois[:, None, 1]\n return abs_img_points\n\n\ndef abs_img_point_to_rel_img_point(abs_img_points,\n img_shape,\n spatial_scale=1.):\n \"\"\"Convert image based absolute point coordinates to image based relative\n coordinates for sampling.\n\n Args:\n abs_img_points (Tensor): Image based absolute point coordinates,\n shape (N, P, 2)\n img_shape (tuple): (height, width) of image or feature map.\n spatial_scale (float): Scale points by this factor. Default: 1.\n\n Returns:\n Tensor: Image based relative point coordinates for sampling,\n shape (N, P, 2)\n \"\"\"\n\n assert isinstance(img_shape, tuple) and len(img_shape) == 2\n h, w = img_shape\n scale = torch.tensor([w, h],\n dtype=torch.float,\n device=abs_img_points.device)\n scale = scale.view(1, 1, 2)\n rel_img_points = abs_img_points / scale * spatial_scale\n\n return rel_img_points\n\n\ndef rel_roi_point_to_rel_img_point(rois,\n rel_roi_points,\n img_shape,\n spatial_scale=1.):\n \"\"\"Convert roi based relative point coordinates to image based absolute\n point coordinates.\n\n Args:\n rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)\n rel_roi_points (Tensor): Point coordinates inside RoI, relative to\n RoI, location, range (0, 1), shape (N, P, 2)\n img_shape (tuple): (height, width) of image or feature map.\n spatial_scale (float): Scale points by this factor. Default: 1.\n\n Returns:\n Tensor: Image based relative point coordinates for sampling,\n shape (N, P, 2)\n \"\"\"\n\n abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points)\n rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img_shape,\n spatial_scale)\n\n return rel_img_point\n\n\ndef point_sample(input, points, align_corners=False, **kwargs):\n \"\"\"A wrapper around :function:`grid_sample` to support 3D point_coords\n tensors Unlike :function:`torch.nn.functional.grid_sample` it assumes\n point_coords to lie inside [0, 1] x [0, 1] square.\n\n Args:\n input (Tensor): Feature map, shape (N, C, H, W).\n points (Tensor): Image based absolute point coordinates (normalized),\n range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).\n align_corners (bool): Whether align_corners. Default: False\n\n Returns:\n Tensor: Features of `point` on `input`, shape (N, C, P) or\n (N, C, Hgrid, Wgrid).\n \"\"\"\n\n add_dim = False\n if points.dim() == 3:\n add_dim = True\n points = points.unsqueeze(2)\n output = F.grid_sample(\n input, denormalize(points), align_corners=align_corners, **kwargs)\n if add_dim:\n output = output.squeeze(3)\n return output\n\n\nclass SimpleRoIAlign(nn.Module):\n\n def __init__(self, out_size, spatial_scale, aligned=True):\n \"\"\"Simple RoI align in PointRend, faster than standard RoIAlign.\n\n Args:\n out_size (tuple[int]): h, w\n spatial_scale (float): scale the input boxes by this number\n aligned (bool): if False, use the legacy implementation in\n MMDetection, align_corners=True will be used in F.grid_sample.\n If True, align the results more perfectly.\n \"\"\"\n\n super(SimpleRoIAlign, self).__init__()\n self.out_size = _pair(out_size)\n self.spatial_scale = float(spatial_scale)\n # to be consistent with other RoI ops\n self.use_torchvision = False\n self.aligned = aligned\n\n def forward(self, features, rois):\n\n num_imgs = features.size(0)\n num_rois = rois.size(0)\n rel_roi_points = generate_grid(\n num_rois, self.out_size, device=rois.device)\n\n point_feats = []\n for batch_ind in range(num_imgs):\n # unravel batch dim\n feat = features[batch_ind].unsqueeze(0)\n inds = (rois[:, 0].long() == batch_ind)\n if inds.any():\n rel_img_points = rel_roi_point_to_rel_img_point(\n rois[inds], rel_roi_points[inds], feat.shape[2:],\n self.spatial_scale).unsqueeze(0)\n point_feat = point_sample(\n feat, rel_img_points, align_corners=not self.aligned)\n point_feat = point_feat.squeeze(0).transpose(0, 1)\n point_feats.append(point_feat)\n\n channels = features.size(1)\n roi_feats = torch.cat(point_feats, dim=0)\n roi_feats = roi_feats.reshape(num_rois, channels, *self.out_size)\n\n return roi_feats\n\n def __repr__(self):\n format_str = self.__class__.__name__\n format_str += '(out_size={}, spatial_scale={}'.format(\n self.out_size, self.spatial_scale)\n return format_str\n" ]
[ [ "torch.Size", "torch.no_grad", "torch.tensor", "torch.cat", "torch.nn.modules.utils._pair" ] ]
dylanhross/dmccs
[ "8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3" ]
[ "prediction/bimod/qglc/cactus_ccs_comp.py" ]
[ "#!/Library/Frameworks/Python.framework/Versions/3.8/bin/python3\n\"\"\"\n\"\"\"\n\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\n# set up plot fonts\nfrom matplotlib import rcParams\nrcParams['font.family'] = 'sans-serif'\nrcParams['font.sans-serif'] = ['Helvetica', 'Arial']\nrcParams['font.size'] = 11\n\n\n# common settings for the same style across plots\nf_size = (3.5, 4)\nbs = {\n 'linewidth': 1., 'align': 'center', 'width': 0.75, 'capstyle': 'round', 'capsize': 6, \n 'error_kw': {\n 'elinewidth': 1., 'ecolor': 'k'\n }\n}\n\"\"\"\nbs = {\n 'fill': False, 'linewidth': 2, 'align': 'center', 'width': 0.8, 'capstyle': 'round', 'capsize': 6, 'hatch': '//'\n}\n\"\"\"\n\n\nfig = plt.figure(figsize=f_size)\n\nax = fig.add_subplot(111)\nax.axhline(202.8, c='k', ls=':', lw=1.5)\nax.axhline(209.5, c='k', ls=':', lw=1.5)\n\nlabels = ['7', '5', '3', \"3'\", \"4'\"]\nx = [3, 4, 5, 2, 1]\nccs = np.loadtxt('cactus_y.txt')\nec = ['orchid', 'yellow', 'lightpink', 'royalblue', 'darkorange']\n\nfor x_, ccs_, ec_ in zip(x, ccs, ec):\n ax.bar(x_, ccs_, edgecolor=ec_, ecolor=ec_, color=ec_, fill=True, **bs)\n ax.bar(x_, ccs_, fill=False, **bs)\n\nax.set_xticks([1, 2, 3, 4, 5])\nax.set_xticklabels(labels, fontstyle='italic', fontsize=14)\n\n\n\n#ax.set_xlim([150, 400])\nax.set_ylim([195, 215])\n\n\nfor d in ['top', 'right']:\n ax.spines[d].set_visible(False)\nax.set_ylabel(r'CCS ($\\AA^2$)')\n#ax.set_xlabel('m/z')\n\n# save figure\npng = 'qglc_cactus_comp.png'\nplt.savefig(png, dpi=400, bbox_inches='tight')\n\nplt.tight_layout()\n#plt.show()\n#plt.close()\n\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "numpy.loadtxt", "matplotlib.pyplot.savefig" ] ]
Xudewang/scarlet
[ "1d2a1806038cda8ac96e4c766a5cfa0b8ae5c1b7" ]
[ "scarlet/display.py" ]
[ "import numpy as np\nfrom astropy.visualization.lupton_rgb import LinearMapping, AsinhMapping\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle, Polygon\nfrom matplotlib.ticker import MaxNLocator\nfrom .bbox import Box\nfrom .component import Component\n\n\ndef channels_to_rgb(channels):\n \"\"\"Get the linear mapping of multiple channels to RGB channels\n The mapping created here assumes the the channels are ordered in wavelength\n direction, starting with the shortest wavelength. The mapping seeks to produce\n a relatively even weights for across all channels. It does not consider e.g.\n signal-to-noise variations across channels or human perception.\n Parameters\n ----------\n channels: int in range(0,7)\n Number of channels\n Returns\n -------\n array (3, channels) to map onto RGB\n \"\"\"\n assert channels in range(\n 0, 8\n ), \"No mapping has been implemented for more than {} channels\".format(channels)\n\n channel_map = np.zeros((3, channels))\n if channels == 1:\n channel_map[0, 0] = channel_map[1, 0] = channel_map[2, 0] = 1\n elif channels == 2:\n channel_map[0, 1] = 0.667\n channel_map[1, 1] = 0.333\n channel_map[1, 0] = 0.333\n channel_map[2, 0] = 0.667\n channel_map /= 0.667\n elif channels == 3:\n channel_map[0, 2] = 1\n channel_map[1, 1] = 1\n channel_map[2, 0] = 1\n elif channels == 4:\n channel_map[0, 3] = 1\n channel_map[0, 2] = 0.333\n channel_map[1, 2] = 0.667\n channel_map[1, 1] = 0.667\n channel_map[2, 1] = 0.333\n channel_map[2, 0] = 1\n channel_map /= 1.333\n elif channels == 5:\n channel_map[0, 4] = 1\n channel_map[0, 3] = 0.667\n channel_map[1, 3] = 0.333\n channel_map[1, 2] = 1\n channel_map[1, 1] = 0.333\n channel_map[2, 1] = 0.667\n channel_map[2, 0] = 1\n channel_map /= 1.667\n elif channels == 6:\n channel_map[0, 5] = 1\n channel_map[0, 4] = 0.667\n channel_map[0, 3] = 0.333\n channel_map[1, 4] = 0.333\n channel_map[1, 3] = 0.667\n channel_map[1, 2] = 0.667\n channel_map[1, 1] = 0.333\n channel_map[2, 2] = 0.333\n channel_map[2, 1] = 0.667\n channel_map[2, 0] = 1\n channel_map /= 2\n elif channels == 7:\n channel_map[:, 6] = 2/3.\n channel_map[0, 5] = 1\n channel_map[0, 4] = 0.667\n channel_map[0, 3] = 0.333\n channel_map[1, 4] = 0.333\n channel_map[1, 3] = 0.667\n channel_map[1, 2] = 0.667\n channel_map[1, 1] = 0.333\n channel_map[2, 2] = 0.333\n channel_map[2, 1] = 0.667\n channel_map[2, 0] = 1\n channel_map /= 2\n return channel_map\n\n\nclass LinearPercentileNorm(LinearMapping):\n def __init__(self, img, percentiles=[1, 99]):\n \"\"\"Create norm that is linear between lower and upper percentile of img\n Parameters\n ----------\n img: array_like\n Image to normalize\n percentile: array_like, default=[1,99]\n Lower and upper percentile to consider. Pixel values below will be\n set to zero, above to saturated.\n \"\"\"\n assert len(percentiles) == 2\n vmin, vmax = np.percentile(img, percentiles)\n super().__init__(minimum=vmin, maximum=vmax)\n\n\nclass AsinhPercentileNorm(AsinhMapping):\n def __init__(self, img, percentiles=[1, 99]):\n \"\"\"Create norm that is linear between lower and upper percentile of img\n Parameters\n ----------\n img: array_like\n Image to normalize\n percentile: array_like, default=[1,99]\n Lower and upper percentile to consider. Pixel values below will be\n set to zero, above to saturated.\n \"\"\"\n assert len(percentiles) == 2\n vmin, vmax = np.percentile(img, percentiles)\n # solution for beta assumes flat spectrum at vmax\n stretch = vmax - vmin\n beta = stretch / np.sinh(1)\n super().__init__(minimum=vmin, stretch=stretch, Q=beta)\n\n\ndef img_to_3channel(img, channel_map=None, fill_value=0):\n \"\"\"Convert multi-band image cube into 3 RGB channels\n Parameters\n ----------\n img: array_like\n This should be an array with dimensions (channels, height, width).\n channel_map: array_like\n Linear mapping with dimensions (3, channels)\n fill_value: float, default=`0`\n Value to use for any masked pixels.\n Returns\n -------\n RGB: numpy array with dtype float\n \"\"\"\n # expand single img into cube\n assert len(img.shape) in [2, 3]\n if len(img.shape) == 2:\n ny, nx = img.shape\n img_ = img.reshape(1, ny, nx)\n elif len(img.shape) == 3:\n img_ = img\n C = len(img_)\n\n # filterWeights: channel x band\n if channel_map is None:\n channel_map = channels_to_rgb(C)\n else:\n assert channel_map.shape == (3, len(img))\n\n # map channels onto RGB channels\n _, ny, nx = img_.shape\n rgb = np.dot(channel_map, img_.reshape(C, -1)).reshape(3, ny, nx)\n\n if hasattr(rgb, \"mask\"):\n rgb = rgb.filled(fill_value)\n\n return rgb\n\n\ndef img_to_rgb(img, channel_map=None, fill_value=0, norm=None, mask=None):\n \"\"\"Convert images to normalized RGB.\n If normalized values are outside of the range [0..255], they will be\n truncated such as to preserve the corresponding color.\n Parameters\n ----------\n img: array_like\n This should be an array with dimensions (channels, height, width).\n channel_map: array_like\n Linear mapping with dimensions (3, channels)\n fill_value: float, default=`0`\n Value to use for any masked pixels.\n norm: `scarlet.display.Norm`, default `None`\n Norm to use for mapping in the allowed range [0..255]. If `norm=None`,\n `scarlet.display.LinearPercentileNorm` will be used.\n mask: array_like\n A [0,1] binary mask to apply over the top of the image,\n where pixels with mask==1 are masked out.\n Returns\n -------\n rgb: numpy array with dimensions (3, height, width) and dtype uint8\n \"\"\"\n RGB = img_to_3channel(img, channel_map=channel_map)\n if norm is None:\n norm = LinearMapping(image=RGB)\n rgb = norm.make_rgb_image(*RGB)\n if mask is not None:\n rgb = np.dstack([rgb, ~mask * 255])\n return rgb\n\n\npanel_size = 4.0\n\n\ndef show_likelihood(blend, figsize=None, **kwargs):\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ax.plot(blend.log_likelihood, **kwargs)\n ax.set_xlabel(\"Iteration\")\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_ylabel(\"log-Likelihood\")\n return fig\n\n\ndef show_observation(\n observation,\n norm=None,\n channel_map=None,\n sky_coords=None,\n show_psf=False,\n add_labels=True,\n figsize=None,\n):\n \"\"\"Plot observation in standardized form.\n \"\"\"\n panels = 1 if show_psf is False else 2\n if figsize is None:\n figsize = (panel_size * panels, panel_size)\n fig, ax = plt.subplots(1, panels, figsize=figsize)\n if not hasattr(ax, \"__iter__\"):\n ax = (ax,)\n\n # Mask any pixels with zero weight in all bands\n mask = np.sum(observation.weights, axis=0) == 0\n # if there are no masked pixels, do not use a mask\n if np.all(mask == 0):\n mask = None\n\n panel = 0\n extent = get_extent(observation.bbox)\n ax[panel].imshow(\n img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Observation\")\n\n if add_labels:\n assert sky_coords is not None, \"Provide sky_coords for labeled objects\"\n\n for k, center in enumerate(sky_coords):\n center_ = observation.get_pixel(center)\n color = \"w\" if observation.C > 1 else \"r\"\n ax[panel].text(*center_[::-1], k, color=color, ha=\"center\", va=\"center\")\n\n panel += 1\n if show_psf:\n psf_image = np.zeros(observation.data.shape)\n\n if observation.psf is not None:\n psf_model = observation.psf.get_model()\n # make PSF as bright as the brightest pixel of the observation\n psf_model *= (\n observation.data.mean(axis=0).max() / psf_model.mean(axis=0).max()\n )\n # insert into middle of \"blank\" observation\n full_box = Box(psf_image.shape)\n shift = tuple(\n psf_image.shape[c] // 2 - psf_model.shape[c] // 2\n for c in range(full_box.D)\n )\n model_box = Box(psf_model.shape) + shift\n model_box.insert_into(psf_image, psf_model)\n # slices = scarlet.box.overlapped_slices\n ax[panel].imshow(img_to_rgb(psf_image, norm=norm), origin=\"lower\")\n ax[panel].set_title(\"PSF\")\n\n fig.tight_layout()\n return fig\n\n\ndef show_scene(\n sources,\n observation=None,\n norm=None,\n channel_map=None,\n show_model=True,\n show_observed=False,\n show_rendered=False,\n show_residual=False,\n add_labels=True,\n add_boxes=False,\n figsize=None,\n linear=True,\n):\n \"\"\"Plot all sources to recreate the scence.\n The functions provides a fast way of evaluating the quality of the entire model,\n i.e. the combination of all scences that seek to fit the observation.\n Parameters\n ----------\n sources: list of source models\n observation: `~scarlet.Observation`\n norm: norm to compress image intensity to the range [0,255]\n channel_map: array_like\n Linear mapping with dimensions (3, channels)\n show_model: bool\n Whether the model is shown in the model frame\n show_observed: bool\n Whether the observation is shown\n show_rendered: bool\n Whether the model, rendered to match the observation, is shown\n show_residual: bool\n Whether the residuals between rendered model and observation is shown\n add_label: bool\n Whether each source is labeled with its numerical index in the source list\n add_boxes: bool\n Whether each source box is shown\n figsize: matplotlib figsize argument\n linear: bool\n Whether or not to display the scene in a single line (`True`) or\n on multiple lines (`False`).\n Returns\n -------\n matplotlib figure\n \"\"\"\n if show_observed or show_rendered or show_residual:\n assert (\n observation is not None\n ), \"Provide matched observation to show observed frame\"\n\n panels = sum((show_model, show_observed, show_rendered, show_residual))\n if linear:\n if figsize is None:\n figsize = (panel_size * panels, panel_size)\n fig, ax = plt.subplots(1, panels, figsize=figsize)\n else:\n columns = int(np.ceil(panels / 2))\n if figsize is None:\n figsize = (panel_size * columns, panel_size * 2)\n fig = plt.figure(figsize=figsize)\n ax = [fig.add_subplot(2, columns, n + 1) for n in range(panels)]\n if not hasattr(ax, \"__iter__\"):\n ax = (ax,)\n\n # Mask any pixels with zero weight in all bands\n if observation is not None:\n mask = np.sum(observation.weights, axis=0) == 0\n # if there are no masked pixels, do not use a mask\n if np.all(mask == 0):\n mask = None\n\n model_frame = sources[0].frame\n model = np.zeros(model_frame.shape)\n for src in sources:\n model += src.get_model(frame=model_frame)\n\n panel = 0\n if show_model:\n extent = get_extent(model_frame.bbox)\n ax[panel].imshow(\n img_to_rgb(model, norm=norm, channel_map=channel_map),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Model\")\n panel += 1\n\n if show_rendered or show_residual:\n model = observation.render(model)\n extent = get_extent(observation.bbox)\n\n if show_rendered:\n ax[panel].imshow(\n img_to_rgb(model, norm=norm, channel_map=channel_map, mask=mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Model Rendered\")\n panel += 1\n\n if show_observed:\n ax[panel].imshow(\n img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Observation\")\n panel += 1\n\n if show_residual:\n residual = observation.data - model\n norm_ = LinearPercentileNorm(residual)\n ax[panel].imshow(\n img_to_rgb(residual, norm=norm_, channel_map=channel_map, mask=mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Residual\")\n panel += 1\n\n for k, src in enumerate(sources):\n if add_boxes:\n panel = 0\n box_kwargs = {\"facecolor\": \"none\", \"edgecolor\": \"w\", \"lw\": 0.5}\n if show_model:\n extent = get_extent(src.bbox)\n rect = Rectangle(\n (extent[0], extent[2]),\n extent[1] - extent[0],\n extent[3] - extent[2],\n **box_kwargs\n )\n ax[panel].add_artist(rect)\n panel = 1\n if observation is not None:\n start, stop = src.bbox.start[-2:][::-1], src.bbox.stop[-2:][::-1]\n points = (start, (start[0], stop[1]), stop, (stop[0], start[1]))\n coords = [\n observation.get_pixel(model_frame.get_sky_coord(p)) for p in points\n ]\n for panel in range(panel, panels):\n poly = Polygon(coords, closed=True, **box_kwargs)\n ax[panel].add_artist(poly)\n\n if add_labels and hasattr(src, \"center\") and src.center is not None:\n center = src.center\n panel = 0\n if show_model:\n ax[panel].text(*center[::-1], k, color=\"w\", ha=\"center\", va=\"center\")\n panel = 1\n if observation is not None:\n center_ = observation.get_pixel(model_frame.get_sky_coord(center))\n for panel in range(panel, panels):\n ax[panel].text(\n *center_[::-1], k, color=\"w\", ha=\"center\", va=\"center\"\n )\n\n fig.tight_layout()\n return fig\n\n\ndef get_extent(bbox):\n return [bbox.start[-1], bbox.stop[-1], bbox.start[-2], bbox.stop[-2]]\n\n\ndef show_sources(\n sources,\n observation=None,\n norm=None,\n channel_map=None,\n show_model=True,\n show_observed=False,\n show_rendered=False,\n show_spectrum=True,\n figsize=None,\n model_mask=None,\n add_markers=True,\n add_boxes=False,\n):\n \"\"\"Plot each source individually.\n The functions provides an more detailed inspection of every source in the list.\n Parameters\n ----------\n sources: list of source models\n observation: `~scarlet.Observation`\n norm: norm to compress image intensity to the range [0,255]\n channel_map: array_like\n Linear mapping with dimensions (3, channels)\n show_model: bool\n Whether the model is shown in the model frame\n show_observed: bool\n Whether the observation is shown\n show_rendered: bool\n Whether the model, rendered to match the observation, is shown\n show_spectrum: bool\n Whether source specturm is shown.\n For multi-component sources, spectra are shown separately.\n figsize: matplotlib figsize argument\n model_mask: array\n Mask used to hide pixels in the model only.\n add_markers: bool\n Whether or not to mark the centers of the sources\n with their source number.\n add_boxes: bool\n Whether source boxes are shown\n Returns\n -------\n matplotlib figure\n \"\"\"\n if show_observed or show_rendered:\n assert (\n observation is not None\n ), \"Provide matched observation to show observed frame\"\n\n panels = sum((show_model, show_observed, show_rendered, show_spectrum))\n if figsize is None:\n figsize = (panel_size * panels, panel_size * len(list(sources)))\n fig, ax = plt.subplots(len(list(sources)), panels, figsize=figsize, squeeze=False)\n\n marker_kwargs = {\"mew\": 1, \"ms\": 10}\n box_kwargs = {\"facecolor\": \"none\", \"edgecolor\": \"w\", \"lw\": 0.5}\n\n for k, src in enumerate(sources):\n\n model_frame = src.frame\n\n if hasattr(src, \"center\") and src.center is not None:\n center = np.array(src.center)[::-1]\n else:\n center = None\n\n if add_boxes:\n start, stop = src.bbox.start[-2:][::-1], src.bbox.stop[-2:][::-1]\n points = (start, (start[0], stop[1]), stop, (stop[0], start[1]))\n box_coords = [\n observation.get_pixel(model_frame.get_sky_coord(p)) for p in points\n ]\n\n # model in its bbox\n panel = 0\n model = src.get_model()\n\n if show_model:\n # Show the unrendered model in it's bbox\n extent = get_extent(src.bbox)\n ax[k][panel].imshow(\n img_to_rgb(model, norm=norm, channel_map=channel_map, mask=model_mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[k][panel].set_title(\"Model Source {}\".format(k))\n if center is not None and add_markers:\n ax[k][panel].plot(*center, \"wx\", **marker_kwargs)\n panel += 1\n\n # model in observation frame\n if show_rendered:\n # Center and show the rendered model\n model_ = src.get_model(frame=model_frame)\n model_ = observation.render(model_)\n extent = get_extent(observation.bbox)\n ax[k][panel].imshow(\n img_to_rgb(model_, norm=norm, channel_map=channel_map),\n extent=extent,\n origin=\"lower\",\n )\n ax[k][panel].set_title(\"Model Source {} Rendered\".format(k))\n\n if center is not None and add_markers:\n center_ = observation.get_pixel(model_frame.get_sky_coord(center))\n ax[k][panel].plot(*center_, \"wx\", **marker_kwargs)\n if add_boxes:\n poly = Polygon(box_coords, closed=True, **box_kwargs)\n ax[k][panel].add_artist(poly)\n panel += 1\n\n if show_observed:\n # Center the observation on the source and display it\n _images = observation.data\n ax[k][panel].imshow(\n img_to_rgb(_images, norm=norm, channel_map=channel_map),\n extent=extent,\n origin=\"lower\",\n )\n ax[k][panel].set_title(\"Observation\".format(k))\n if center is not None and add_markers:\n center_ = observation.get_pixel(model_frame.get_sky_coord(center))\n ax[k][panel].plot(*center_, \"wx\", **marker_kwargs)\n if add_boxes:\n poly = Polygon(box_coords, closed=True, **box_kwargs)\n ax[k][panel].add_artist(poly)\n panel += 1\n\n if show_spectrum:\n # needs to be evaluated in the source box to prevent truncation\n if hasattr(src, \"__iter__\") and isinstance(src[0], Component):\n spectra = []\n for component in src:\n model_ = component.get_model()\n spectra.append(model_.sum(axis=(1, 2)))\n else:\n spectra = [model.sum(axis=(1, 2))]\n\n for spectrum in spectra:\n ax[k][panel].plot(spectrum)\n ax[k][panel].set_xticks(range(len(spectrum)))\n if hasattr(src.frame, \"channels\") and src.frame.channels is not None:\n ax[k][panel].set_xticklabels(src.frame.channels)\n ax[k][panel].set_title(\"Spectrum\")\n ax[k][panel].set_xlabel(\"Channel\")\n ax[k][panel].set_ylabel(\"Intensity\")\n\n fig.tight_layout()\n return fig\n" ]
[ [ "matplotlib.ticker.MaxNLocator", "numpy.sum", "numpy.ceil", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.sinh", "matplotlib.pyplot.subplots", "numpy.dstack", "numpy.all", "matplotlib.patches.Rectangle", "matplotlib.patches.Polygon", "numpy.array", "numpy.percentile" ] ]
jfrancis71/TensorFlowApps
[ "a9c61e2d5146c02715748221f51c656143b51b02" ]
[ "DownloadVGG_Faces.py" ]
[ "import os\nfrom PIL import Image\nimport urllib.request as ur\nimport urllib.request\nfrom io import BytesIO\nimport requests\nimport csv\nimport h5py\nimport numpy as np\nimport argparse\n\ndef retrieve_patch( rec ):\n response = requests.get( rec[1], timeout=10 )\n file = BytesIO( response.content )\n img = Image.open( file )\n ptch = img.crop( ( float(rec[2]),float(rec[3]),float(rec[4]), float(rec[5])) ).resize( (32,32) ).convert('L')\n return np.asarray( ptch, dtype=np.uint8 )\n\ndef retrieve_celeb( filename ):\n csvfile = open( filename, 'r')\n reader = csv.reader(csvfile, delimiter=' ')\n pts = []\n for row in reader:\n print( \"image = \", row[0] )\n if ( row[8] != '1' ):\n continue\n try:\n pt = retrieve_patch( row )\n pts.append( pt )\n except IOError as e:\n continue\n return pts\n\n#Parsing the command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-folder\",\n help=\"folder for the HDF5 file and subfolder files\")\nargs = parser.parse_args()\n\ncontent_list = os.listdir( os.path.join( args.folder, \"files\") )\n\ncelebs = []\nfor celeb in content_list[0:100]:\n print( \"Celeb\", celeb )\n pts = retrieve_celeb( os.path.join( args.folder, \"files\", celeb ) )\n celebs = celebs + pts\n\nfile = h5py.File( os.path.join( args.folder, \"dataset.hdf5\" ), 'w')\ndset = file.create_dataset(\"/patches\", data = celebs )\nfile.close()\n" ]
[ [ "numpy.asarray" ] ]
zaradana/Fast_BERT
[ "7ee96e99ba95468c29fe3542fe8071e0402ec0f6" ]
[ "0_fast_bert/prediction.py" ]
[ "import os\nimport torch\nfrom .data_cls import BertDataBunch\nfrom .data_ner import BertNERDataBunch\nfrom .learner_cls import BertLearner\nfrom .learner_ner import BertNERLearner\nimport time\n\nfrom transformers import AutoTokenizer\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n\n\nclass BertClassificationPredictor(object):\n def __init__(\n self,\n model_path,\n label_path,\n multi_label=False,\n model_type=\"bert\",\n use_fast_tokenizer=True,\n do_lower_case=True,\n device=None,\n ):\n if device is None:\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n self.model_path = model_path\n self.label_path = label_path\n self.multi_label = multi_label\n self.model_type = model_type\n self.do_lower_case = do_lower_case\n self.device = device\n\n # Use auto-tokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.model_path, use_fast=use_fast_tokenizer\n )\n\n self.learner = self.get_learner()\n\n def get_learner(self):\n databunch = BertDataBunch(\n self.label_path,\n self.label_path,\n self.tokenizer,\n train_file=None,\n val_file=None,\n batch_size_per_gpu=32,\n max_seq_length=512,\n multi_gpu=False,\n multi_label=self.multi_label,\n model_type=self.model_type,\n no_cache=True,\n )\n\n learner = BertLearner.from_pretrained_model(\n databunch,\n self.model_path,\n metrics=[],\n device=self.device,\n logger=None,\n output_dir=None,\n warmup_steps=0,\n multi_gpu=False,\n is_fp16=False,\n multi_label=self.multi_label,\n logging_steps=0,\n )\n\n return learner\n\n def predict_batch(self, texts):\n return self.learner.predict_batch(texts)\n\n def predict(self, text):\n predictions = self.predict_batch([text])[0]\n return predictions\n\n\nclass BertNERPredictor(object):\n def __init__(\n self,\n model_path,\n label_path,\n model_type=\"bert\",\n use_fast_tokenizer=True,\n do_lower_case=True,\n device=None,\n ):\n if device is None:\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n self.model_path = model_path\n self.label_path = label_path\n self.model_type = model_type\n self.do_lower_case = do_lower_case\n self.device = device\n\n # Use auto-tokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.model_path, use_fast=use_fast_tokenizer\n )\n\n self.learner = self.get_learner()\n\n def get_learner(self):\n databunch = BertNERDataBunch(\n self.label_path,\n self.tokenizer,\n train_file=None,\n val_file=None,\n batch_size_per_gpu=32,\n max_seq_length=512,\n multi_gpu=False,\n model_type=self.model_type,\n no_cache=True,\n )\n\n learner = BertNERLearner.from_pretrained_model(\n databunch,\n self.model_path,\n device=self.device,\n logger=None,\n output_dir=None,\n warmup_steps=0,\n multi_gpu=False,\n is_fp16=False,\n logging_steps=0,\n )\n\n return learner\n\n def predict_batch(self, texts, group=True, exclude_entities=[\"O\"]):\n predictions = []\n\n for text in texts:\n pred = self.predict(text, group=group, exclude_entities=exclude_entities)\n if pred:\n predictions.append({\"text\": text, \"results\": pred})\n return predictions\n\n def predict(self, text, group=True, exclude_entities=[\"O\"]):\n predictions = self.learner.predict(\n text, group=group, exclude_entities=exclude_entities\n )\n return predictions\n" ]
[ [ "torch.cuda.is_available", "torch.device" ] ]