repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
msadang/blazingsql | [
"5fe3e418dbee4a3961998b0e25ec81100a1a1490"
] | [
"tests/BlazingSQLTest/Runner/runTest.py"
] | [
"# Cast column to f64 before convert it to pandas\n# This is a hack, use the assert_equal comparator when nulls is\n# fully supported on cudf.sort_values\nimport json\nimport logging\nimport os\nimport re\nimport time\n\nimport blazingsql\nfrom blazingsql import DataType\n\n# import git\nimport numpy as np\nimport pandas as pd\n\nfrom BlazingLogging import loggingHandler as lhandler\nfrom Configuration import ExecutionMode\nfrom Configuration import Settings as Settings\nfrom DataBase import createSchema as cs\n\nif ((Settings.execution_mode == ExecutionMode.FULL and\n Settings.compare_res == \"true\") or\n Settings.execution_mode == ExecutionMode.GENERATOR):\n print(Settings.execution_mode)\n print(Settings.compare_res)\n from pydrill.client import PyDrill\n from pyspark.sql.session import SparkSession\n\nclass Result:\n def __init__(self, columns, resultSet, resultBlz):\n self.columns = columns\n self.resultSet = resultSet\n self.resultBlz = resultBlz\n\n\nname = \"blzlogging\"\n\nHANDLER = lhandler.logging_handler()\n\n\nclass loggerblz:\n def __init__(self, query, error, totaltime):\n self.query = query\n self.error = error\n self.totaltime = totaltime\n\n\nclass result:\n def __init__(self, res_execution, error):\n self.res_execution = res_execution\n self.error = error\n\n\ndef logginghelper(name):\n # logging.basicConfig(filename='example.txt',level=logging.DEBUG)\n logging._defaultFormatter = logging.Formatter()\n logger = logging.getLogger(name)\n logger.handlers = []\n logger.setLevel(logging.DEBUG)\n logger.addHandler(HANDLER)\n return logger\n\n\ndef loggingClose(name):\n HANDLER.log = []\n\n\ndef upcast_to_float(df):\n for name in df.columns:\n if np.issubdtype(df[name].dtype, np.bool_):\n df[name] = df[name].astype(np.float32)\n elif np.issubdtype(df[name].dtype, np.integer):\n df[name] = df[name].astype(np.float64)\n return df\n\n\ndef to_pandas_f64_engine(df, expected_types_list):\n count = 0\n for col in df.columns:\n if count >= len(expected_types_list):\n break\n\n if expected_types_list[count] != np.dtype(object):\n if df.shape[0] > 0:\n if not np.issubdtype(df[col].dtype, np.number) and not np.issubdtype(\n df[col].dtype, np.datetime64\n ):\n if np.issubdtype(expected_types_list[count], np.bool_):\n df[col] = (\n df[col].map({\"true\": 1.0, \"false\": 0.0}).astype(np.float32)\n )\n elif np.issubdtype(expected_types_list[count], np.datetime64):\n df[col] = df[col].astype(expected_types_list[count])\n else:\n df[col] = pd.to_numeric(df[col], errors=\"coerce\")\n count = count + 1\n\n return df\n\n\ndef get_null_constants(df):\n null_values = {}\n for col, dtype in df.dtypes.to_dict().items():\n if np.issubdtype(dtype, np.datetime64):\n null_values[col] = np.datetime64(\"nat\")\n elif np.issubdtype(dtype, np.number):\n null_values[col] = np.nan\n return null_values\n\n\ndef compare_results(pdf1, pdf2, acceptable_difference, use_percentage, engine):\n np.warnings.filterwarnings(\"ignore\")\n\n if pdf1.size == 0 and pdf2.size == 0:\n return \"Success\"\n\n msg = \"\"\n if not isinstance(engine, str):\n if isinstance(engine, PyDrill):\n msg = \"PyDrill\"\n else:\n msg = \"PySpark\"\n elif engine==\"drill\":\n msg = \"PyDrill\"\n else:\n msg = \"PySpark\"\n\n msg = \"\"\n if not isinstance(engine, str):\n if isinstance(engine, PyDrill):\n msg = \"PyDrill\"\n else:\n msg = \"PySpark\"\n elif engine==\"drill\":\n msg = \"PyDrill\"\n else:\n msg = \"PySpark\"\n\n if pdf1.shape[0] == pdf2.shape[0]:\n if pdf1.shape[1] == pdf2.shape[1]:\n\n for name in pdf1.columns:\n if pdf1[name].dtype == np.object:\n pdf1[name] = pdf1[name].astype('string')\n\n for name in pdf2.columns:\n if pdf2[name].dtype == np.object:\n pdf2[name] = pdf2[name].astype('string')\n\n # Removing indexes, because those are considered when\n # comparing with equals()\n pdf1.reset_index(drop=True, inplace=True)\n pdf2.reset_index(drop=True, inplace=True)\n\n # Make the column labels equal as equals() also compare labels\n orig_pdf2_labels = pdf2.columns.to_list()\n pdf2.columns = pdf1.columns.to_list()\n\n exac_comp = pdf1.select_dtypes(exclude=np.inexact).equals(\n pdf2.select_dtypes(exclude=np.inexact)\n )\n\n # Restore labels\n pdf2.columns = orig_pdf2_labels\n\n tmp_pdf1 = pdf1.select_dtypes(include=np.inexact)\n tmp_pdf2 = pdf2.select_dtypes(include=np.inexact)\n\n\n if use_percentage:\n relative_tolerance = acceptable_difference\n absolute_tolerance = 0\n else:\n relative_tolerance = 0\n absolute_tolerance = acceptable_difference\n # np.allclose follows this formula:\n # absolute(a - b) <= (absolute_tolerance + relative_tolerance * absolute(b))\n\n res = np.all(exac_comp) and np.allclose(\n tmp_pdf1.values, tmp_pdf2.values, relative_tolerance,\n absolute_tolerance, equal_nan=True\n )\n if res:\n return \"Success\"\n else:\n return \"Fail: Different values\"\n else:\n return (\n \"Fail: Different number of columns blzSQLresult: \"\n + str(pdf1.shape[1])\n + \" \"\n + msg\n + \" result: \"\n + str(pdf2.shape[1])\n )\n else:\n return (\n \"Fail: Different number of rows blzSQLresult: \"\n + str(pdf1.shape[0])\n + \" \"\n + msg\n + \" result: \"\n + str(pdf2.shape[0])\n )\n\n\ndef begins_with(col1, col2, exp):\n return col1.startswith(exp) or col2.startswith(exp)\n\n\ndef compare_column_names(pdf1, pdf2):\n if len(pdf1.columns) != len(pdf2.columns):\n if pdf1.values.size == 0 and pdf2.values.size == 0:\n return True\n print(\"Different set of columns\")\n return False\n for blzCol, drillCol in zip(\n pdf1.columns.values.tolist(), pdf2.columns.values.tolist()\n ):\n if blzCol != drillCol:\n if (\n begins_with(drillCol, blzCol, \"EXPR\") is False\n and begins_with(drillCol, blzCol, \"count(\") is False\n ):\n print(\"Different columns\")\n return False\n return True\n\n# NOTE kharoly percy william: NEVER CHANGE THE ORDER of these\n# lines (the logger logic depends that we log first queryType and then queryId\n# WARNING DO NOT CHANGE THE CALL ORDER IN THIS FUCTION!\n\n\ndef get_Branch():\n branch = blazingsql.__branch_name__\n return branch\n\n\ndef get_CommitHash():\n commit = blazingsql.__version__\n return commit\n\n\ndef get_QueryId(input_type, test_name, test_id):\n query_id = (\n str(input_type).upper()\n + \"-\"\n + str(get_codTest(test_name)).upper()\n + \"-\"\n + str(test_id)\n )\n return query_id\n\n\ndef get_resultId(resultComparisson):\n result_id = 1\n if resultComparisson != \"Success\":\n result_id = 0\n return result_id\n\n\ndef get_codTest(test_name):\n switcher = {\n \"Aggregations without group by\": \"AGGWOGRBY\",\n \"Coalesce\": \"COALESCE\",\n \"Column Basis\": \"COLBAS\",\n \"Bindable Alias\": \"BALIAS\",\n \"Boolean\": \"BOOL\",\n \"Case\": \"CASE\",\n \"Cast\": \"CAST\",\n \"Common Table Expressions\": \"COMTABLEX\",\n \"Concat\": \"CONCAT\",\n \"Count Distinct\": \"COUNTD\",\n \"Count without group by\": \"COUNTWOGRBY\",\n \"Cross join\": \"CROSSJOIN\",\n \"Date\": \"DATE\",\n \"DayOfWeek\": \"DAYOFWEEK\",\n \"Dir\": \"DIR\",\n \"File System Google Storage\": \"FSGS\",\n \"Hdfs FileSystem\": \"FSHDFS\",\n \"Hive FileSystem\": \"FSHIVE\",\n \"File System Local\": \"FSLOCAL\",\n \"File System S3\": \"FSS3\",\n \"Full outer join\": \"FOUTJOIN\",\n \"Group by\": \"GROUPBY\",\n \"Group by without aggregations\": \"GRBYWOAGG\",\n \"Inner join\": \"INNERJOIN\",\n \"Left outer join\": \"LOUTJOIN\",\n \"Like\": \"LIKE\",\n \"Literal\": \"LITERAL\",\n \"Nested Queries\": \"NESTEDQ\",\n \"Non-EquiJoin Queries\": \"NEQUIJOIN\",\n \"Order by\": \"ORDERBY\",\n \"Predicates With Nulls\": \"PREDWNULLS\",\n \"Round\": \"ROUND\",\n \"Replace\": \"REPLACE\",\n \"Simple Distribution From Local\": \"SIMPLEDIST\",\n \"Smiles Test\": \"SMILES\",\n \"Substring\": \"SUBSTRING\",\n \"Tables from Pandas\": \"TBLPANDAS\",\n \"Timestampdiff\": \"TIMESTAMPD\",\n \"Timestamp\": \"TIMESTAMP\",\n \"To_timestamp\": \"TO_TIMESTAMP\",\n \"TPCH Queries\": \"TPCH\",\n \"Config Options\": \"TPCH\", # we want the same outputs as the tpch test\n \"Unary ops\": \"UNARYOPS\",\n \"Unify Tables\": \"UNIFYTBL\",\n \"Union\": \"UNION\",\n \"Limit\": \"LIMIT\",\n \"Where clause\": \"WHERE\",\n \"Wild Card\": \"WILDCARD\",\n \"Simple String\": \"SSTRING\",\n \"String case\": \"STRINGCASE\",\n \"Message Validation\": \"MESSAGEVAL\"\n }\n\n return switcher.get(test_name)\n\ndef print_fixed_log(\n logger,\n test_name,\n input_type,\n test_id,\n sql,\n resultComparisson,\n error_message,\n load_time,\n engine_time,\n total_time,\n):\n commitHash = get_CommitHash()\n branchName = get_Branch()\n # dateNow=datetime.now()\n inputType = cs.get_extension(input_type)\n\n logger.info(get_QueryId(inputType, test_name, test_id)) # QueryID\n logger.info(Settings.dateNow) # TimeStamp\n logger.info(test_name) # TestGroup\n logger.info(inputType) # InputType\n logger.info(sql) # Query\n logger.info(get_resultId(resultComparisson)) # Result\n logger.info(error_message) # Error\n logger.info(branchName) # PR\n logger.info(commitHash) # CommitHash\n logger.info(Settings.data[\"RunSettings\"][\"nRals\"])\n logger.info(Settings.data[\"RunSettings\"][\"nGPUs\"])\n logger.info(Settings.data[\"TestSettings\"][\"dataDirectory\"])\n logger.info(test_id)\n logger.info(load_time)\n logger.info(engine_time)\n logger.info(total_time)\n\n\ndef print_query_results(\n sql,\n queryId,\n queryType,\n pdf1,\n pdf2,\n resultgdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n):\n if print_result:\n print(\"#BLZ:\")\n print(pdf1)\n if not isinstance(engine, str):\n if isinstance(engine, PyDrill):\n print(\"#DRILL:\")\n else:\n print(\"#PYSPARK:\")\n print(pdf2)\n else:\n if engine==\"drill\":\n print(\"#DRILL:\")\n else:\n print(\"#PYSPARK:\")\n data_type = cs.get_extension(input_type)\n print(str(queryId) + \" Test \" + queryType + \" - \" + data_type)\n print(\"#QUERY:\")\n print(sql)\n print(\"RESULT:\")\n\n error_message = \"\"\n stringResult = \"\"\n\n compareResults = True\n if \"compare_results\" in Settings.data[\"RunSettings\"]:\n compareResults = Settings.data[\"RunSettings\"][\"compare_results\"]\n\n if compareResults:\n columnNamesComparison = compare_column_names(pdf1, pdf2)\n if columnNamesComparison is not True:\n print(\"Columns:\")\n print(pdf1.columns)\n print(pdf2.columns)\n\n error_message = \"Column names are not the same\"\n print(\"ERROR:\")\n print(error_message)\n\n resultComparisson = compare_results(\n pdf1, pdf2, acceptable_difference, use_percentage, engine\n )\n if resultComparisson != \"Success\":\n error_message = resultComparisson[6:]\n print(\"ERROR:\")\n print(error_message)\n\n stringResult = resultComparisson\n if resultComparisson != \"Success\" or columnNamesComparison is False:\n stringResult = \"Fail\"\n else:\n stringResult = \"Success\"\n print(stringResult)\n\n print(\"TOTAL TIME: \")\n print(total_time)\n print(\"CRASHED NODES: \")\n # print(resultgdf.n_crashed_nodes)\n print(\"TOTAL NODES: \")\n # print(resultgdf.total_nodes)\n print(\"===================================================\")\n\n logger = logginghelper(name)\n\n # TODO percy kharoly bindings we need to get the number from internal api\n # print_fixed_log(logger, queryType, queryId, sql, stringResult,\n # error_message, 1, 1, 2)\n print_fixed_log(\n logger,\n queryType,\n input_type,\n queryId,\n sql,\n stringResult,\n error_message,\n load_time,\n engine_time,\n total_time,\n )\n\ndef print_query_results2(sql, queryId, input_type, queryType, error_message, message_validation):\n print(queryId)\n print(\"#QUERY:\")\n print(sql)\n print(\"RESULT:\")\n result = validate_messages(error_message, message_validation)\n print(result)\n print(\"ERROR:\")\n if result==\"Fail\":\n print(error_message)\n else:\n error_message=\"\"\n print(\"CALCITE TIME: \")\n print(\"-\")\n print(\"RAL TIME: \")\n print(\"-\")\n print(\"EXECUTION TIME: \")\n print(\"-\")\n\n print(\"===================================================\")\n\n logger = logginghelper(name)\n\n print_fixed_log(\n logger, queryType, input_type, queryId, sql, result, error_message, None, None, None\n )\n\ndef print_query_results_performance(sql, queryId, queryType, resultgdf):\n print(queryId)\n print(\"#QUERY:\")\n print(sql)\n print(\"RESULT:\")\n resultComparisson = \"Success\"\n print(\"CALCITE TIME: \")\n print(resultgdf.calciteTime)\n print(\"RAL TIME: \")\n print(resultgdf.ralTime)\n print(\"EXECUTION TIME: \")\n print(resultgdf.totalTime)\n\n print(\"===================================================\")\n\n logger = logginghelper(name)\n\n print_fixed_log(\n logger,\n queryType,\n queryId,\n sql,\n resultComparisson,\n \" \",\n resultgdf.calciteTime,\n resultgdf.ralTime,\n resultgdf.totalTime,\n )\n\n\ndef print_query_results_dist(\n sql,\n queryId,\n queryType,\n pdf1,\n pdf2,\n resultgdf,\n acceptable_difference,\n use_percentage,\n print_result,\n):\n if print_result:\n print(\"#BLZ:\")\n print(pdf1)\n print(\"#DRILL:\")\n print(pdf2)\n print(queryId)\n print(\"#QUERY:\")\n print(sql)\n print(\"RESULT:\")\n resultComparisson = compare_results(\n pdf1.values, pdf2.values, acceptable_difference, use_percentage\n )\n error_message = \"\"\n if resultComparisson != \"Success\":\n error_message = resultComparisson[6:]\n resultComparisson = \"Fail\"\n print(resultComparisson)\n print(\"ERROR:\")\n print(error_message)\n else:\n print(resultComparisson)\n print(\"CALCITE TIME: \")\n print(resultgdf.calciteTime)\n print(\"RAL TIME: \")\n print(resultgdf.ralTime)\n print(\"EXECUTION TIME: \")\n print(resultgdf.totalTime)\n\n print(\"===================================================\")\n\n logger = logginghelper(name)\n\n print_fixed_log(\n logger,\n queryType,\n queryId,\n sql,\n resultComparisson,\n error_message,\n None,\n None,\n None,\n )\n\n\nclass Test:\n def __init__(self, test_name):\n self.test_name = test_name\n self.total = 0\n self.success = 0\n self.fail_ids = []\n\n\ndef save_log(gpu_ci_mode=False):\n\n c = 1\n cadena = []\n subcadena = []\n countPass = 0\n countCrash = 0\n\n for x in HANDLER.log:\n if c < 17:\n subcadena.append(x.msg)\n c = c + 1\n else:\n c = 1\n cadena.append(subcadena)\n subcadena = []\n subcadena.append(x.msg)\n c = c + 1\n print()\n cadena.append(subcadena)\n\n # If it didn't run any test (probably some were skipped)\n # then return success\n if cadena == [[]]:\n return True, []\n\n df = pd.DataFrame(\n cadena,\n columns=[\n \"QueryID\",\n \"TimeStamp\",\n \"TestGroup\",\n \"InputType\",\n \"Query\",\n \"Result\",\n \"Error\",\n \"Branch\",\n \"CommitHash\",\n \"nRals\",\n \"nGPUs\",\n \"DataDirectory\",\n \"TestId\",\n \"LoadingTime\",\n \"EngineTotalTime\",\n \"TotalTime\",\n ],\n )\n\n total = df.shape[0]\n\n countPass = df[df.Result == 1].count()[\"Result\"]\n\n df1 = df[\n [\n \"QueryID\",\n \"TimeStamp\",\n \"TestGroup\",\n \"InputType\",\n \"Query\",\n \"Result\",\n \"Error\",\n \"Branch\",\n \"CommitHash\",\n \"nRals\",\n \"nGPUs\",\n \"DataDirectory\",\n \"LoadingTime\",\n \"EngineTotalTime\",\n \"TotalTime\",\n ]\n ].copy()\n\n create_summary_detail(df, gpu_ci_mode)\n\n printSummary(countPass, countCrash, total, gpu_ci_mode)\n\n if not gpu_ci_mode:\n saveLogInFile(df1)\n\n saveLog = False\n if \"saveLog\" in Settings.data[\"RunSettings\"]:\n saveLog = Settings.data[\"RunSettings\"][\"saveLog\"]\n\n print(\"saveLog = \" + str(saveLog))\n\n # TODO william kharoly felipe we should try to enable and use\n # this function in the future\n # result, error_msgs = verify_prev_google_sheet_results(df1)\n result, error_msgs = True, []\n\n if result is True and saveLog == \"true\":\n saving_google_sheet_results(df1)\n else:\n if countPass < total:\n result, error_msgs = False, []\n else:\n result, error_msgs = True, []\n\n loggingClose(name)\n return result, error_msgs\n\n\ndef create_summary_detail(df, no_color):\n pdf = df\n pdf[\"Result\"] = df[\"Result\"].replace(1, \"Success\")\n pdf[\"Result\"] = df[\"Result\"].replace(0, \"Fail\")\n\n # making boolean series for a team name\n filter_fail = pdf[\"Result\"] == \"Fail\"\n\n # filtering data\n pdf2 = pdf.where(filter_fail)\n pdf_fail = pdf2.dropna()\n\n if no_color:\n green = \"\"\n yellow = \"\"\n # red = \"\"\n endc = \"\"\n else:\n green = bcolors.OKGREEN\n yellow = bcolors.WARNING\n # red = bcolors.FAIL\n endc = bcolors.ENDC\n\n # display\n print(green + \"========================================================\")\n print(\"DETAILED SUMMARY TESTS\")\n print(\"========================================================\" + endc)\n pd.set_option(\"max_rows\", 1500)\n print(pdf.groupby([\"TestGroup\", \"InputType\"])[\"Result\"].value_counts())\n print(yellow + \"========================================================\")\n print(\"FAILED TESTS\" + yellow)\n print(\"========================================================\" + endc)\n # pd.set_option('max_columns', 5)\n # pd.set_option('max_colwidth', 1000)\n\n pd.set_option(\"display.max_columns\", None)\n pd.set_option(\"display.width\", 2000)\n pd.set_option(\"display.float_format\", \"{:20,.2f}\".format)\n pd.set_option(\"display.max_colwidth\", None)\n print(\n pdf_fail.groupby([\"TestGroup\", \"InputType\", \"Result\"])[\"TestId\"]\n .apply(\",\".join)\n .reset_index()\n )\n\n\n# This function use the google spreadsheet to compare the current results\n# against historic ones\n# Returns a tuple with 2 entries:\n# 1st element: False in case gpuci should be fail, True otherwise\n# 2nd element: A list of error messages (in case 1st element is False)\n# Example:\n# result, error_msgs = verify_prev_google_sheet_results(log_pdf)\n# if result == False:\n# exits the python process and do not move to next steps\n# TODO william kharoly felipe we should try to enable and use\n# this function in the future\ndef _verify_prev_google_sheet_results(log_pdf):\n import gspread\n from oauth2client.service_account import ServiceAccountCredentials\n\n def get_the_data_from_sheet():\n # Use creds to create a client to interact with the Google Drive API\n scope = [\n \"https://www.googleapis.com/auth/drive\",\n \"https://spreadsheets.google.com/feeds\",\n ]\n # Using credentials from BlazingSQL\n # os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']\n # # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest\n # current_dir = \"/home/ubuntu/.conda/envs/e2e\"\n\n log_info = Settings.data[\"RunSettings\"][\"logInfo\"]\n\n if log_info == \"\":\n print(\n \"\"\"####### ======= >>>>>>> WARNING this test run will not\n be compared against old results from Google Docs. Define\n the env var BLAZINGSQL_E2E_LOG_INFO\"\"\"\n )\n return None\n\n log_info = json.loads(log_info)\n creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(\n log_info, scope\n )\n client_blazing = gspread.authorize(creds_blazing)\n # Find a Locally workbook by name and open a sheet\n work_sheet = \"BSQL Log Results\"\n\n if \"worksheet\" in Settings.data[\"RunSettings\"]:\n work_sheet = Settings.data[\"RunSettings\"][\"worksheet\"]\n\n sheet_blazing = client_blazing.open(\"BSQL End-to-End Tests\").worksheet(\n work_sheet\n )\n # Writing log results into Blazing sheet\n ret = pd.DataFrame(sheet_blazing.get_all_records())\n # NOTE percy kharo william we need to patch these columns\n # before convert to parquet\n ret[\"LoadingTime\"] = ret[\"LoadingTime\"].astype(str)\n ret[\"EngineTotalTime\"] = ret[\"EngineTotalTime\"].astype(str)\n ret[\"TotalTime\"] = ret[\"TotalTime\"].astype(str)\n return ret\n\n dir_log = Settings.data[\"TestSettings\"][\"logDirectory\"]\n gspreadCacheHint = Settings.data[\"RunSettings\"][\"gspreadCacheHint\"]\n gspread_e2e_cache_path = dir_log + \"/e2e-gspread-cache.parquet\"\n\n gspread_df = None\n\n if gspreadCacheHint == \"false\":\n gspread_df = get_the_data_from_sheet()\n if gspread_df is not None:\n # Always save a cache (so when gspreadCacheHint\n # is false will refresh the cache)\n gspread_df.to_parquet(gspread_e2e_cache_path)\n elif gspreadCacheHint == \"true\":\n if os.path.isfile(gspread_e2e_cache_path):\n gspread_df = pd.read_parquet(gspread_e2e_cache_path)\n else:\n gspread_df = get_the_data_from_sheet()\n if gspread_df is not None:\n gspread_df.to_parquet(gspread_e2e_cache_path)\n\n if gspread_df is None:\n error_msg = \"\"\"ERROR: This test run could not be compared\n against old results from Google Docs\"\"\"\n return False, [error_msg]\n\n log_pdf_copy = log_pdf.copy()\n prev_nrals = gspread_df[\"nRALS\"][0]\n curr_nrals = Settings.data[\"RunSettings\"][\"nRals\"]\n\n # Assume prev_nrals == curr_nrals\n last_e2e_run_id = gspread_df[\"Timestamp\"][0]\n # NOTE If prev_nrals != curr_nrals we need to search the first\n # Timestamp (a.k.a ID) for the current nRals target\n if prev_nrals != curr_nrals:\n gspread_df_uniques = gspread_df.drop_duplicates()\n gspread_df_uniques_target_nrals = gspread_df_uniques.loc[\n gspread_df_uniques[\"nRALS\"] == curr_nrals\n ]\n last_e2e_run_id = gspread_df_uniques_target_nrals.iloc[\n 0, 1\n ] # select the first Timestamp from the unique values\n\n print(\n \"####### ======= >>>>>>> E2E INFO: We will compare the\"\n + \" current run against the ID (Timestamp): \"\n + last_e2e_run_id\n )\n\n last_e2e_run_df = gspread_df.loc[gspread_df[\"Timestamp\"] == last_e2e_run_id]\n\n # NOTE percy kharo william we need to rename some columns to use our dfs\n log_pdf_copy = log_pdf_copy.rename(\n columns={\n \"TestGroup\": \"Test Group\",\n \"InputType\": \"Input Type\",\n \"nRals\": \"nRALS\",\n \"DataDirectory\": \"data_dir\",\n }\n )\n\n # NOTE For debugging\n # log_pdf_copy['TimeStamp'] = log_pdf_copy['TimeStamp'].astype(str)\n # log_pdf_copy.to_parquet('/home/percy/workspace/logtest/ultimo.parquet',\n # compression='GZIP')\n # log_pdf_copy = pd.read_parquet('/home/user/last_run_log_df.parquet')\n\n error_msgs = []\n\n prev_summary = last_e2e_run_df.groupby(\"Test Group\").count()\n curr_summary = log_pdf_copy.groupby(\"Test Group\").count()\n\n prev_test_groups = prev_summary.index.tolist()\n curr_test_groups = curr_summary.index.tolist()\n\n has_less_test_groups = len(prev_test_groups) > len(curr_test_groups)\n\n # Check if someone deleted some tests\n # (there more test groups in the sheet)\n if has_less_test_groups:\n list_difference = [\n item for item in prev_test_groups if item not in curr_test_groups\n ]\n error_msg = (\n \"ERROR: current e2e has less test groups than\"\n + \" previous run, delta is %s\" % list_difference\n )\n error_msgs.append(error_msg)\n\n # Just check the common test groups\n if has_less_test_groups:\n test_groups = curr_test_groups\n else:\n test_groups = prev_test_groups\n\n for test_group in test_groups:\n prev_test_group_df = last_e2e_run_df.loc[\n last_e2e_run_df[\"Test Group\"] == test_group\n ]\n prev_input_types = (\n prev_test_group_df.groupby(\"Input Type\").count().index.tolist()\n )\n\n curr_test_group_df = log_pdf_copy.loc[log_pdf_copy[\"Test Group\"] == test_group]\n cur_input_typ = curr_test_group_df.groupby(\"Input Type\").count().index.tolist()\n\n has_less_input_types = len(prev_input_types) > len(cur_input_typ)\n\n if has_less_input_types is True:\n list_difference = [\n item for item in prev_input_types if item not in cur_input_typ\n ]\n error_msg = \"\"\"ERROR: current test group %s has less\n input types cases, delta is %s\"\"\" % (\n test_group,\n list_difference,\n )\n error_msgs.append(error_msg)\n\n for input_type in prev_input_types:\n prev_tests_df = prev_test_group_df.loc[\n prev_test_group_df[\"Input Type\"] == input_type\n ]\n prev_tests_df.sort_values(by=[\"QueryID\"])\n\n curr_tests_df = curr_test_group_df.loc[\n curr_test_group_df[\"Input Type\"] == input_type\n ]\n curr_tests_df.sort_values(by=[\"QueryID\"])\n\n # We need to make a copy since we are going to drop some row\n prev_tests_df = prev_tests_df.copy()\n curr_tests_df = curr_tests_df.copy()\n\n # NOTE for debugging\n # print(\"============================================PREV!\")\n # print(prev_tests_df.head())\n # print(len(prev_tests_df))\n # print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxCURR!\")\n # print(curr_tests_df.head())\n # print(len(curr_tests_df))\n\n # Check if current run has less tests than previous run\n len_prev_tests_df = len(prev_tests_df)\n len_curr_tests_df = len(curr_tests_df)\n has_less_tests = len_prev_tests_df > len_curr_tests_df\n\n # NOTE for debugging\n # print(\"====== PREV TESTS ======\")\n # print(prev_tests_df)\n # print(\"====== CURR TESTS ======\")\n # print(curr_tests_df)\n\n if has_less_tests:\n prev_tests = prev_tests_df[\"QueryID\"].tolist()\n curr_tests = curr_tests_df[\"QueryID\"].tolist()\n list_difference = [\n item for item in prev_tests if item not in curr_tests\n ]\n error_msg = \"\"\"ERROR: The test group %s has less tests than\n previous run for input type %s, delta is %s\"\"\" % (\n test_group,\n input_type,\n list_difference,\n )\n error_msgs.append(error_msg)\n\n n = len_prev_tests_df - len_curr_tests_df\n prev_tests_df.drop(prev_tests_df.tail(n).index, inplace=True)\n elif len_prev_tests_df < len_curr_tests_df:\n n = len_curr_tests_df - len_prev_tests_df\n curr_tests_df.drop(curr_tests_df.tail(n).index, inplace=True)\n\n prev_tests_results = prev_tests_df[\"Result\"].to_list()\n curr_tests_results = curr_tests_df[\"Result\"].to_list()\n\n for i in range(0, len(prev_tests_results)):\n prev_test_result = prev_tests_results[i]\n curr_test_result = curr_tests_results[i]\n\n if prev_test_result == 1 and curr_test_result == 0:\n error_msg = \"\"\"ERROR: Test %d for %s (%s) is now failing\n but before was ok!\"\"\" % (\n i + 1,\n test_group,\n input_type,\n )\n error_msgs.append(error_msg)\n\n succs = len(error_msgs) == 0\n return succs, error_msgs\n\n\ndef saving_google_sheet_results(log_pdf):\n import gspread\n from oauth2client.service_account import ServiceAccountCredentials\n\n log_info = Settings.data[\"RunSettings\"][\"logInfo\"]\n\n if log_info == \"\":\n print(\n \"\"\"####### ======= >>>>>>> WARNING this test run will\n not save its results into the Google spreadsheet.\"\"\"\n )\n return\n\n # Create an empty list\n log_list = []\n\n # Iterate over each row\n for index, rows in log_pdf.iterrows():\n # Create a list for the current row (ADDS)\n current_list = [\n rows.QueryID,\n str(rows.TimeStamp),\n str(rows.TestGroup),\n rows.InputType,\n rows.Query,\n rows.Result,\n rows.Error,\n rows.Branch,\n str(rows.CommitHash),\n rows.nRals,\n rows.nGPUs,\n rows.DataDirectory,\n rows.LoadingTime,\n rows.EngineTotalTime,\n rows.TotalTime,\n ]\n\n # append the list to the final list\n log_list.append(current_list)\n # Use creds to create a client to interact with the Google Drive API\n scope = [\n \"https://www.googleapis.com/auth/drive\",\n \"https://spreadsheets.google.com/feeds\",\n ]\n # === 1. BlazingSQL =====\n # Using credentials from BlazingSQL\n # os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']\n # # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest\n current_dir = \"/home/ubuntu/.conda/envs/e2e\"\n print(current_dir)\n\n log_info = json.loads(log_info)\n creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(log_info, scope)\n client_blazing = gspread.authorize(creds_blazing)\n # Find a Locally workbook by name and open a sheet\n work_sheet = \"BSQL Log Results\"\n if \"worksheet\" in Settings.data[\"RunSettings\"]:\n work_sheet = Settings.data[\"RunSettings\"][\"worksheet\"]\n blaz_googlesheat = client_blazing.open(\"BSQL End-to-End Tests\")\n sheet_blazing = blaz_googlesheat.worksheet(work_sheet)\n # Writing log results into Blazing sheet\n total_queries = len(log_list)\n for i in range(0, total_queries):\n sheet_blazing.append_row(log_list[i])\n time.sleep(1)\n\n print(\"\\nTable was uptdated into Blazing Google SpreadSheet\")\n\n\ndef saveLogInFile(df):\n dir_log = Settings.data[\"TestSettings\"][\"logDirectory\"]\n filepath = getFileName(dir_log)\n df.to_excel(filepath, index=False)\n\ndef validate_messages(error_message, message_validation):\n error_message = error_message.replace('\\n', ' ').replace('\\r', ' ')\n message_validation = message_validation.replace('\\n', ' ').replace('\\r', ' ')\n error_message = error_message.replace(' ', '')\n message_validation = message_validation.replace(' ', '')\n\n if error_message == message_validation:\n result = \"Success\"\n else:\n result = \"Fail\"\n\n return result\n\nclass bcolors:\n HEADER = \"\\033[95m\"\n OKBLUE = \"\\033[94m\"\n OKGREEN = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n ENDC = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n\n\ndef on_jenkins():\n # NOTE For more env vars see\n # https://wiki.jenkins.io/display/JENKINS/Building+a+software+project\n jenkins_job = os.environ.get(\"JOB_NAME\")\n if jenkins_job is not None:\n return True\n\n return False\n\n\ndef print_tests(tests, onlyFails=False):\n print(\n \"\"\"************************************************************\n *******************\"\"\"\n )\n\n tab = \" \"\n\n failedPrefix = \"\"\n if onlyFails:\n failedPrefix = \"FAILED\"\n\n # TODO percy check None\n for extension in tests:\n if onlyFails:\n if extension == \"parquet\":\n print(\n \"!!!!!!!!!!!!!!!! \"\n + failedPrefix\n + \" \"\n + extension\n + \" TESTS !!!!!!!!!!!!\"\n )\n else:\n print(\n \"!!!!!!!!!!!!!!!! \"\n + failedPrefix\n + \" \"\n + extension\n + \" TESTS !!!!!!!!!!!!!!!!\"\n )\n else:\n if extension == \"parquet\":\n print(\"################ \" + extension + \" TESTS ############\")\n else:\n print(\"############## \" + extension + \" TESTS ##############\")\n\n testNames = tests.get(extension)\n for testName in testNames:\n test = testNames.get(testName)\n\n total = test.get(\"total\")\n countPass = test.get(\"countPass\")\n countCrash = test.get(\"countCrash\")\n failIds = test.get(\"failIds\")\n\n showTest = False\n\n if onlyFails:\n if len(failIds) > 0:\n showTest = True\n print(tab + \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n else:\n showTest = True\n print(tab + \"++++++++++++++++++++++++++++++++\")\n\n if showTest:\n green = bcolors.OKGREEN\n yellow = bcolors.WARNING\n red = bcolors.FAIL\n endc = bcolors.ENDC\n\n # don't use colors since jenkins doesn't support ansi chars\n if on_jenkins():\n green = \"\"\n yellow = \"\"\n red = \"\"\n endc = \"\"\n\n print(\n tab\n + \"SUMMARY for \"\n + failedPrefix\n + \" test suite: \"\n + testName\n + \" - \"\n + extension\n )\n\n if not onlyFails:\n pass_green = green\n pass_endc = endc\n if (\n countPass != total\n ): # if no full pass then don't use green colors here\n pass_green = \"\"\n pass_endc = \"\"\n\n print(\n pass_green\n + tab\n + \"PASSED: \"\n + str(countPass)\n + \"/\"\n + str(total)\n + pass_endc\n )\n\n fails = total - countPass - countCrash\n yellow_fail = yellow\n yellow_endc = endc\n if fails == 0:\n yellow_fail = \"\"\n yellow_endc = \"\"\n\n print(\n yellow_fail\n + tab\n + \"FAILED: \"\n + str(fails)\n + \"/\"\n + str(total)\n + \" \"\n + str(failIds)\n + yellow_endc\n )\n\n red_crash = red\n red_endc = endc\n\n # if no crashes then don't use red colors here\n if countCrash == 0:\n red_crash = \"\"\n red_endc = \"\"\n\n print(\n red_crash\n + tab\n + \"CRASH: \"\n + str(countCrash)\n + \"/\"\n + str(total)\n + red_endc\n )\n\n if not onlyFails:\n print(tab + \"TOTAL: \" + str(total))\n\n\ndef printSummary(countPass, countCrash, total, no_color):\n\n if no_color:\n green = \"\"\n yellow = \"\"\n red = \"\"\n endc = \"\"\n else:\n green = bcolors.OKGREEN\n yellow = bcolors.WARNING\n red = bcolors.FAIL\n endc = bcolors.ENDC\n\n # Second: print the global summary (totals from all the tests)\n fails = total - countPass - countCrash\n print(\n \"\"\"**********************************************************\n *********************\"\"\"\n )\n print(\"TOTAL SUMMARY for test suite: \")\n print(green + \"PASSED: \" + str(countPass) + \"/\" + str(total) + endc)\n print(yellow + \"FAILED: \" + str(fails) + \"/\" + str(total) + endc)\n print(red + \"CRASH: \" + str(countCrash) + \"/\" + str(total) + endc)\n print(\"TOTAL: \" + str(total))\n\n\ndef getFileName(dir_log):\n\n fecha = time.strftime(\"%H%M%S\")\n hora = time.strftime(\"%I%M%S\")\n return dir_log + \"LogTest\" + fecha + hora + \".xlsx\" #\n\n\n# ===========================================================================\n\ntableNames = [\n \"customer\",\n \"orders\",\n \"supplier\",\n \"lineitem\",\n \"part\",\n \"partsupp\",\n \"nation\",\n \"region\",\n \"perf\",\n \"acq\",\n \"names\",\n \"bool_orders\",\n \"web_site\",\n \"web_sales\",\n \"web_returns\",\n \"web_page\",\n \"web_clickstreams\",\n \"warehouse\",\n \"time_dim\",\n \"store_sales\",\n \"store_returns\",\n \"store\",\n \"ship_mode\",\n \"reason\",\n \"promotion\",\n \"product_reviews\",\n \"item_marketprices\",\n \"item\",\n \"inventory\",\n \"income_band\",\n \"household_demographics\",\n \"date_dim\",\n \"customer_demographics\",\n \"customer_address\",\n \"customer\",\n \"split\",\n \"docked\",\n \"smiles\",\n \"dcoids\",\n]\n\n\ndef get_table_occurrences(query):\n res = []\n for name in tableNames:\n if query.find(name) != -1:\n res.append(name)\n return res\n\n\ndef replace_all(text, dic):\n for i, j in dic.items():\n text = re.sub(r\"\\s%s(\\s|$|\\,)\" % i, j, text)\n return text\n\n\ndef get_blazingsql_query(db_name, query):\n new_query = query\n for table_name in get_table_occurrences(query):\n new_query = replace_all(\n new_query,\n {table_name: \" %(table)s \" % {\"table\": db_name + \".\" + table_name}},\n )\n return new_query\n\n\ndef get_drill_query(query):\n new_query = query\n for table_name in get_table_occurrences(query):\n new_query = replace_all(\n new_query, {table_name: \" dfs.tmp.`%(table)s` \" % {\"table\": table_name}}\n )\n return new_query\n\n\n# ================================================================================================================\n\n\ndef run_query_drill(drill, query_str):\n timeout = 400\n query_result = drill.query(query_str, timeout)\n df = query_result.to_dataframe()\n if df.size == 0:\n return Result(query_result.columns, df, None)\n df = df[query_result.columns]\n result = Result(query_result.columns, df, None)\n return result\n\n\ndef run_query_spark(spark, query_str):\n query_result = spark.sql(query_str)\n df = query_result.toPandas()\n if df.size == 0:\n return Result(query_result.columns, df, None)\n df = df[query_result.columns]\n result = Result(query_result.columns, df, None)\n return result\n\n\ndef save_results_arrow(filename, pdf2):\n # save results\n import pyarrow as pa\n\n table = pa.Table.from_pandas(pdf2)\n # schema = pa.Schema.from_pandas(pdf2)\n with open(filename, \"bw\") as f:\n writer = pa.RecordBatchFileWriter(f, table.schema)\n writer.write(table)\n writer.close()\n\n\ndef save_results_parquet(filename, pdf2):\n pdf2.to_parquet(filename, compression=\"GZIP\")\n\n\ndef run_query(\n bc,\n engine,\n query,\n queryId,\n queryType,\n worder,\n orderBy,\n acceptable_difference,\n use_percentage,\n input_type,\n **kwargs\n):\n print(query)\n\n query_spark = kwargs.get(\"query_spark\", query)\n\n algebra = kwargs.get(\"algebra\", \"\")\n\n nRals = Settings.data[\"RunSettings\"][\"nRals\"]\n\n print_result = kwargs.get(\"print_result\")\n if print_result is None:\n print_result = False\n\n message_validation = kwargs.get(\"message_validation\", \"\")\n if message_validation is None:\n message_validation = False\n\n data_type = cs.get_extension(input_type)\n\n if Settings.execution_mode != \"Generator\":\n print(\n \"\\n=============== New query: \"\n + str(queryId)\n + \" - \"\n + data_type\n + \" =================\"\n )\n\n load_time = 0\n engine_time = 0\n total_time = 0\n\n nested_query = kwargs.get(\"nested_query\", False)\n \n error_message = \"\"\n\n if not nested_query:\n # if int(nRals) == 1: # Single Node\n query_blz = query # get_blazingsql_query('main', query)\n if algebra == \"\":\n start_time = time.time()\n try:\n result_gdf = bc.sql(query_blz)\n except Exception as e:\n error_message=str(e)\n\n if not message_validation:\n end_time = time.time()\n total_time = (end_time - start_time) * 1000\n # SUM(CASE WHEN info = 'evaluate_split_query load_data' THEN\n # duration ELSE 0 END) AS load_time,\n # MAX(load_time) AS load_time,\n # log_result = bc.log(\n # \"\"\"SELECT\n # MAX(end_time) as end_time, query_id,\n # MAX(total_time) AS total_time\n # FROM (\n # SELECT\n # query_id, node_id,\n # SUM(CASE WHEN info = 'Query Execution Done' THEN\n # duration ELSE 0 END) AS total_time,\n # MAX(log_time) AS end_time\n # FROM\n # bsql_logs\n # WHERE\n # info = 'evaluate_split_query load_data'\n # OR info = 'Query Execution Done'\n # GROUP BY\n # node_id, query_id\n # )\n # GROUP BY\n # query_id\n # ORDER BY\n # end_time DESC limit 1\"\"\"\n # )\n\n # if int(nRals) == 1: # Single Node\n # n_log = log_result\n # else: # Simple Distribution\n # n_log = log_result.compute()\n\n load_time = 0 # n_log['load_time'][0]\n engine_time = 0 #n_log[\"total_time\"][0]\n else:\n result_gdf = bc.sql(query_blz, algebra=algebra)\n\n else: # for nested queries as column basis test\n result_gdf = kwargs.get(\"blz_result\", [])\n\n str_code_test = str(get_codTest(queryType)).upper()\n filename = str_code_test + \"-\" + str(queryId) + \".parquet\"\n\n result_dir = Settings.data[\"TestSettings\"][\"fileResultsDirectory\"]\n file_results_dir = str(result_dir)\n\n\n if not message_validation== \"\":\n print_query_results2(\n query,\n queryId,\n input_type,\n queryType,\n error_message,\n message_validation\n )\n elif not isinstance(engine, str):\n if isinstance(engine, PyDrill):\n # Drill\n query_drill = get_drill_query(query)\n result_drill_gd = run_query_drill(engine, query_drill)\n if result_gdf is not None:\n if result_gdf.columns is not None:\n # FOR DASK CUDF\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n expected_dtypes = result_gdf.dtypes.to_list()\n pdf1 = (\n upcast_to_float(result_gdf)\n .fillna(get_null_constants(result_gdf))\n .to_pandas()\n )\n pdf2 = to_pandas_f64_engine(\n result_drill_gd.resultSet, expected_dtypes\n )\n pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))\n formatResults(pdf1, pdf2, worder, orderBy)\n\n if Settings.execution_mode == ExecutionMode.GENERATOR:\n file_res_drill_dir = (\n file_results_dir + \"/\" + \"drill\" + \"/\" + filename\n )\n\n if not os.path.exists(file_res_drill_dir):\n save_results_parquet(file_res_drill_dir, pdf2)\n\n print(\"Drill: \" + filename + \" generated.\")\n\n else:\n print_query_results(\n query,\n queryId,\n queryType,\n pdf1,\n pdf2,\n result_gdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n )\n\n else:\n print_query_results2(\n query, queryId, queryType, result_gdf.error_message\n )\n elif isinstance(engine, SparkSession):\n # Spark\n result_spark_df = run_query_spark(engine, query_spark)\n\n if result_gdf is not None:\n if result_gdf.columns is not None:\n\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n expected_dtypes = result_gdf.dtypes.to_list()\n pdf1 = (\n upcast_to_float(result_gdf)\n .fillna(get_null_constants(result_gdf))\n .to_pandas()\n )\n pdf2 = to_pandas_f64_engine(\n result_spark_df.resultSet, expected_dtypes\n )\n pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))\n formatResults(pdf1, pdf2, worder, orderBy)\n\n if Settings.execution_mode == ExecutionMode.GENERATOR:\n\n file_res_drill_dir = (\n file_results_dir + \"/\" + \"spark\" + \"/\" + filename\n )\n\n if not os.path.exists(file_res_drill_dir):\n save_results_parquet(file_res_drill_dir, pdf2)\n print(\"Spark: \" + filename + \" generated.\")\n\n else:\n print_query_results(\n query_spark,\n queryId,\n queryType,\n pdf1,\n pdf2,\n result_gdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n )\n else:\n print_query_results2(\n query_spark, queryId, queryType, result_gdf.error_message\n )\n else: # GPUCI\n\n compareResults = True\n if \"compare_results\" in Settings.data[\"RunSettings\"]:\n compareResults = Settings.data[\"RunSettings\"][\"compare_results\"]\n\n if compareResults == \"true\":\n resultFile = file_results_dir + \"/\" + str(engine) + \"/\" + filename\n pdf2 = get_results(resultFile)\n if result_gdf is not None:\n if result_gdf.columns is not None:\n # FOR DASK CUDF\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n expected_dtypes = result_gdf.dtypes.to_list()\n pdf1 = (\n upcast_to_float(result_gdf)\n .fillna(get_null_constants(result_gdf))\n .to_pandas()\n )\n format_pdf(pdf1, worder, orderBy)\n print(pdf2)\n\n print_query_results(\n query,\n queryId,\n queryType,\n pdf1,\n pdf2,\n result_gdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n )\n\n else:\n print_query_results2(\n query, queryId, queryType, result_gdf.error_message\n )\n else:\n if result_gdf is not None:\n if result_gdf.columns is not None:\n # FOR DASK CUDF\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n expected_dtypes = result_gdf.dtypes.to_list()\n pdf1 = (\n upcast_to_float(result_gdf)\n .fillna(get_null_constants(result_gdf))\n .to_pandas()\n )\n pdf2 = pd.DataFrame()\n formatResults(pdf1, pdf2, worder, orderBy)\n\n print_query_results(\n query,\n queryId,\n queryType,\n pdf1,\n pdf2,\n result_gdf,\n acceptable_difference,\n use_percentage,\n print_result,\n engine,\n input_type,\n load_time,\n engine_time,\n total_time,\n )\n else:\n print_query_results2(\n query, queryId, queryType, result_gdf.error_message\n )\n\ndef run_query_log(\n bc,\n query,\n queryId,\n queryType,\n **kwargs\n):\n result_gdf = None\n error_message = \"\"\n message_validation = \"\"\n\n try:\n result_gdf = bc.log(query)\n except Exception as e:\n error_message=str(e)\n\n if result_gdf is not None:\n if result_gdf.columns is not None:\n # FOR DASK CUDF\n import dask_cudf\n\n if type(result_gdf) is dask_cudf.core.DataFrame:\n result_gdf = result_gdf.compute()\n\n print_query_results2(\n query, queryId, DataType.CUDF, queryType, error_message, message_validation\n )\n else:\n print_query_results2(\n query, queryId, DataType.CUDF, queryType, error_message, message_validation\n )\n\ndef run_query_performance(\n bc,\n drill,\n query,\n queryId,\n queryType,\n worder,\n orderBy,\n acceptable_difference,\n use_percentage,\n **kwargs\n):\n # Blazing\n query_blz = query # get_blazingsql_query('main', query)\n result_gdf = bc.sql(query_blz).get()\n if result_gdf.error_message == \"\":\n print_query_results_performance(query, queryId, queryType, result_gdf)\n else:\n print_query_results2(query, queryId, queryType, result_gdf.error_message)\n\n\ndef formatResults(pdf1, pdf2, worder, orderBy):\n if worder == 1 and pdf1.size != 0 and pdf2.size != 0:\n if len(pdf1.columns) == len(pdf2.columns):\n pdf1.sort_values(\n [orderBy] if orderBy else pdf1.columns.to_list(), inplace=True\n )\n pdf2.sort_values(\n [orderBy] if orderBy else pdf2.columns.to_list(), inplace=True\n )\n\n\ndef format_pdf(pdf, worder, orderBy):\n if worder == 1 and pdf.size != 0:\n pdf.sort_values([orderBy] if orderBy else pdf.columns.to_list(), inplace=True)\n\n\ndef get_results(result_file):\n df = pd.read_parquet(result_file)\n\n return df\n"
] | [
[
"numpy.allclose",
"numpy.issubdtype",
"pandas.DataFrame",
"numpy.dtype",
"numpy.datetime64",
"pandas.read_parquet",
"numpy.all",
"numpy.warnings.filterwarnings",
"pandas.set_option",
"pandas.to_numeric"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
andyj10224/psi4numpy | [
"cbef6ddcb32ccfbf773befea6dc4aaae2b428776",
"cbef6ddcb32ccfbf773befea6dc4aaae2b428776"
] | [
"Moller-Plesset/MP3.py",
"Self-Consistent-Field/SOUHF_iterative.py"
] | [
"\"\"\"\nReference implementation for the correlation energy of MP3 with an RHF reference.\n\nReferences:\n- Equations from [Szabo:1996]\n\"\"\"\n\n__authors__ = \"Daniel G. A. Smith\"\n__credits__ = [\"Daniel G. A. Smith\", \"Dominic A. Sirianni\"]\n\n__copyright__ = \"(c) 2014-2018, The Psi4NumPy Developers\"\n__license__ = \"BSD-3-Clause\"\n__date__ = \"2017-05-23\"\n\nimport time\nimport numpy as np\nnp.set_printoptions(precision=5, linewidth=200, suppress=True)\nimport psi4\n\n# Memory for Psi4 in GB\npsi4.set_memory('2 GB')\npsi4.core.set_output_file('output.dat', False)\n\n# Memory for numpy in GB\nnumpy_memory = 2\n\nmol = psi4.geometry(\"\"\"\nO\nH 1 1.1\nH 1 1.1 2 104\nsymmetry c1\n\"\"\")\n\npsi4.set_options({'basis': 'aug-cc-pvdz',\n 'scf_type': 'pk',\n 'guess': 'core',\n 'mp2_type': 'conv',\n 'mp_type': 'conv',\n 'freeze_core': 'false',\n 'e_convergence': 1e-8,\n 'd_convergence': 1e-8})\n\n# First compute RHF energy using Psi4\nscf_e, wfn = psi4.energy('SCF', return_wfn=True)\n\n# Coefficient Matrix\nC = np.array(wfn.Ca())\n# Double occupied orbitals\nndocc = wfn.doccpi()[0]\n# Number of molecular orbitals\nnmo = wfn.nmo()\n# SCF energy\nSCF_E = wfn.energy()\n# Orbital energies\neps = wfn.epsilon_a()\neps = np.array([eps.get(x) for x in range(C.shape[0])])\n\n# Compute size of ERI tensor in GB\nERI_Size = (nmo**4)*8.0 / 1E9\nprint(\"Size of the ERI tensor will be %4.2f GB.\" % ERI_Size)\nmemory_footprint = ERI_Size*2.5\nif memory_footprint > numpy_memory:\n clean()\n raise Exception(\"Estimated memory utilization (%4.2f GB) exceeds numpy_memory limit of %4.2f GB.\" % (memory_footprint, numpy_memory))\n\n# Integral generation from Psi4's MintsHelper\nt = time.time()\nmints = psi4.core.MintsHelper(wfn.basisset())\nI = np.array(mints.ao_eri())\nI = I.reshape(nmo, nmo, nmo, nmo)\n\nprint('\\nTotal time taken for ERI integrals: %.3f seconds.' % (time.time()-t))\n\nt=time.time()\n\n# Complete the AOpqrs -> MOiajb step\nMO = np.einsum('rJ,pqrs->pqJs', C, I)\nMO = np.einsum('pI,pqJs->IqJs', C, MO)\nMO = np.einsum('sB,IqJs->IqJB', C, MO)\nMO = np.einsum('qA,IqJB->IAJB', C, MO)\n\n# (pq|rs) -> <ps|rq>\nMO = MO.swapaxes(1, 2)\n\nprint('\\nTotal time taken for integral transformation: %.f seconds' % (time.time()-t))\nprint('Shape of MO integrals %s \\n' % str(MO.shape))\n\n# Build epsilon tensor\neocc = eps[:ndocc]\nevirt = eps[ndocc:]\nepsilon = 1/(eocc.reshape(-1, 1, 1, 1) + eocc.reshape(-1, 1, 1) - evirt.reshape(-1, 1) - evirt)\n\n# Build o and v slices\no = slice(0, ndocc)\nv = slice(ndocc, MO.shape[0])\n\n### MP2 correlation energy\n\nMP2corr_E = 2 * np.einsum('abrs,rsab,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)\nMP2corr_E -= np.einsum('abrs,rsba,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)\nMP2total_E = SCF_E + MP2corr_E\nprint('MP2 correlation energy: %16.8f' % MP2corr_E)\nprint('MP2 total energy: %16.8f' % MP2total_E)\npsi4.compare_values(psi4.energy('MP2'), MP2total_E, 6, 'MP2 Energy')\n\nprint('\\n Starting MP3 energy...')\nt = time.time()\n\n# MP3 Correlation energy\n\n# Prefactors taken from terms in unnumbered expression for spatial-orbital MP3\n# energy on [Szabo:1996] pp. (bottom) 367 - (top) 368. Individual equations taken\n# from [Szabo:1996] Tbl. 6.2 pp. 364-365\n\n# Equation 1: 3rd order diagram 1\nMP3corr_E = 2.0 * np.einsum('abru,ruts,tsab,abru,abts', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon) \n# Equation 2: 3rd order diagram 2 \nMP3corr_E += 2.0 * np.einsum('adrs,cbad,rscb,adrs,cbrs', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 3: 3rd order diagram 3\nMP3corr_E += -4.0 * np.einsum('acrt,rbsc,stab,acrt,abst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 4: 3rd order diagram 4\nMP3corr_E += -4.0 * np.einsum('bcrt,rasb,stac,bcrt,acst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 5: 3rd order diagram 5\nMP3corr_E += 8.0 * np.einsum('acrt,btsc,rsab,acrt,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 6: 3rd order diagram 6\nMP3corr_E += 2.0 * np.einsum('cbrt,atsc,rsab,cbrt,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 7: 3rd order diagram 7\nMP3corr_E += -1.0 * np.einsum('acrs,dbac,srdb,acrs,dbrs', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 8: 3rd order diagram 8\nMP3corr_E += -1.0 * np.einsum('abrt,trus,usab,abtr,abus', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon)\n# Equation 9: 3rd order diagram 9\nMP3corr_E += 2.0 * np.einsum('bcrt,arbs,tsac,cbrt,acst', MO[o, o, v, v], MO[o, v, o, v], MO[v, v, o, o], epsilon, epsilon)\n# Equation 10: 3rd order diagram 10\nMP3corr_E += 2.0 * np.einsum('cbrt,rasb,stac,cbrt,acst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)\n# Equation 11: 3rd order diagram 11\nMP3corr_E += -4.0 * np.einsum('abrs,scat,rtbc,abrs,cbrt', MO[o, o, v, v], MO[v, o, o, v], MO[v, v, o, o], epsilon, epsilon)\n# Equation 12: 3rd order diagram 12\nMP3corr_E += -4.0 * np.einsum('bcrt,atsc,rsab,bctr,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)\n\nprint('...took %.3f seconds to compute MP3 correlation energy.\\n' % (time.time()-t))\n\nprint('Third order energy: %16.8f' % MP3corr_E)\nMP3corr_E += MP2corr_E\nMP3total_E = SCF_E + MP3corr_E\nprint('MP3 correlation energy: %16.8f' % MP3corr_E)\nprint('MP3 total energy: %16.8f' % MP3total_E)\npsi4.compare_values(psi4.energy('MP3'), MP3total_E, 6, 'MP3 Energy')\n\n\n",
"\"\"\"\nUnrestricted Hartree--Fock script using iterative second-order\nconvergence acceleration via preconditioned conjugate gradients (PCG).\n\nReferences:\n- UHF equations & algorithms from [Szabo:1996]\n- SO equations from [Helgaker:2000]\n- PCG equations & algorithm from [Shewchuk:1994]\n\"\"\"\n\n__authors__ = \"Daniel G. A. Smith\"\n__credits__ = [\"Daniel G. A. Smith\"]\n\n__copyright__ = \"(c) 2014-2018, The Psi4NumPy Developers\"\n__license__ = \"BSD-3-Clause\"\n__date__ = \"2017-9-30\"\n\nimport time\nimport numpy as np\nimport helper_HF as scf_helper\nnp.set_printoptions(precision=5, linewidth=200, suppress=True)\nimport psi4\n\n# Memory for Psi4 in GB\npsi4.set_memory('2 GB')\npsi4.core.set_output_file('output.dat', False)\n\n# Memory for numpy in GB\nnumpy_memory = 2\n\n# Triplet O2, actually very multireference\nmol = psi4.geometry(\"\"\"\n 0 3\n O\n O 1 1.2\nsymmetry c1\n\"\"\")\n\npsi4.set_options({'basis': 'aug-cc-pvdz',\n 'scf_type': 'df',\n 'e_convergence': 1e-8,\n 'reference': 'uhf'})\n\n# Set defaults\nmaxiter = 10\nE_conv = 1.0E-8\nD_conv = 1.0E-5\nmax_micro = 4\nmicro_conv = 5.e-2\nmicro_print = True\n\n# Integral generation from Psi4's MintsHelper\nt = time.time()\nwfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('BASIS'))\nmints = psi4.core.MintsHelper(wfn.basisset())\nS = np.asarray(mints.ao_overlap())\n\n# Occupations\nnbf = wfn.nso()\nnalpha = wfn.nalpha()\nnbeta = wfn.nbeta()\n\nprint('\\nNumber of doubly occupied orbitals: %d' % nalpha)\nprint('\\nNumber of singly occupied orbitals: %d' % (nalpha - nbeta))\nprint('Number of basis functions: %d' % nbf)\n\nV = np.asarray(mints.ao_potential())\nT = np.asarray(mints.ao_kinetic())\n\nprint('\\nTotal time taken for integrals: %.3f seconds.' % (time.time()-t))\n\nt = time.time()\n\n# Build H_core\nH = T + V\n\n# Orthogonalizer A = S^(-1/2)\nA = mints.ao_overlap()\nA.power(-0.5, 1.e-16)\nA = np.asarray(A)\n\n\ndef diag_H(H, nocc):\n Hp = A.dot(H).dot(A)\n e, C2 = np.linalg.eigh(Hp)\n C = A.dot(C2)\n Cocc = C[:, :nocc]\n D = np.einsum('pi,qi->pq', Cocc, Cocc)\n return (C, D)\n\n\ndef SCF_Hx(xa, xb, moFa, Co_a, Cv_a, moFb, Co_b, Cv_b):\n \"\"\"\n Compute the \"matrix-vector\" product between electronic Hessian (rank-4) and\n matrix of nonredundant orbital rotations (rank-2).\n\n Parameters\n ----------\n x : numpy.array\n Matrix of nonredundant rotations.\n moF : numpy.array\n MO-basis Fock matrix\n Co : numpy.array\n Matrix of occupied orbital coefficients.\n Cv : numpy.array\n Matrix of virtual orbital coefficients.\n\n Returns\n -------\n F : numpy.array\n Hessian product tensor\n \"\"\"\n Hx_a = np.dot(moFa[:nbeta, :nbeta], xa)\n Hx_a -= np.dot(xa, moFa[nbeta:, nbeta:])\n\n Hx_b = np.dot(moFb[:nalpha, :nalpha], xb)\n Hx_b -= np.dot(xb, moFb[nalpha:, nalpha:])\n\n # Build two electron part, M = -4 (4 G_{mnip} - g_{mpin} - g_{npim}) K_{ip}\n # From [Helgaker:2000] Eqn. 10.8.65\n C_right_a = np.einsum('ia,sa->si', -xa, Cv_a)\n C_right_b = np.einsum('ia,sa->si', -xb, Cv_b)\n\n J, K = scf_helper.compute_jk(jk, [Co_a, Co_b], [C_right_a, C_right_b])\n\n Jab = J[0] + J[1]\n Hx_a += (Co_a.T).dot(2 * Jab - K[0].T - K[0]).dot(Cv_a)\n Hx_b += (Co_b.T).dot(2 * Jab - K[1].T - K[1]).dot(Cv_b)\n\n Hx_a *= -4\n Hx_b *= -4\n\n return (Hx_a, Hx_b)\n\nCa, Da = diag_H(H, nbeta)\nCb, Db = diag_H(H, nalpha)\n\nt = time.time()\nE = 0.0\nEnuc = mol.nuclear_repulsion_energy()\nEold = 0.0\n\n# Initialize the JK object\njk = psi4.core.JK.build(wfn.basisset())\njk.initialize()\n\n# Build a DIIS helper object\ndiisa = scf_helper.DIIS_helper()\ndiisb = scf_helper.DIIS_helper()\n\nprint('\\nTotal time taken for setup: %.3f seconds' % (time.time() - t))\n\nprint('\\nStart SCF iterations:\\n')\nt = time.time()\n\nfor SCF_ITER in range(1, maxiter + 1):\n\n # Build Fock matrices\n J, K = scf_helper.compute_jk(jk, [Ca[:, :nbeta], Cb[:, :nalpha]])\n J = J[0] + J[1]\n Fa = H + J - K[0]\n Fb = H + J - K[1]\n\n # DIIS error build and update\n diisa_e = Fa.dot(Da).dot(S) - S.dot(Da).dot(Fa)\n diisa_e = (A.T).dot(diisa_e).dot(A)\n diisa.add(Fa, diisa_e)\n\n diisb_e = Fb.dot(Db).dot(S) - S.dot(Db).dot(Fb)\n diisb_e = (A.T).dot(diisb_e).dot(A)\n diisb.add(Fb, diisb_e)\n\n # SCF energy and update\n SCF_E = np.einsum('pq,pq->', Da + Db, H)\n SCF_E += np.einsum('pq,pq->', Da, Fa)\n SCF_E += np.einsum('pq,pq->', Db, Fb)\n SCF_E *= 0.5\n SCF_E += Enuc\n\n dRMS = 0.5 * (np.mean(diisa_e**2)**0.5 + np.mean(diisb_e**2)**0.5)\n print('SCF Iteration %3d: Energy = %4.16f dE = % 1.5E dRMS = %1.5E'\n % (SCF_ITER, SCF_E, (SCF_E - Eold), dRMS))\n if (abs(SCF_E - Eold) < E_conv) and (dRMS < D_conv):\n break\n\n Eold = SCF_E\n\n Co_a = Ca[:, :nbeta]\n Cv_a = Ca[:, nbeta:]\n moF_a = np.dot(Ca.T, Fa).dot(Ca)\n gradient_a = -4 * moF_a[:nbeta, nbeta:]\n gradient_norm_a = np.linalg.norm(gradient_a)\n\n Co_b = Cb[:, :nalpha]\n Cv_b = Cb[:, nalpha:]\n moF_b = np.dot(Cb.T, Fb).dot(Cb)\n gradient_b = -4 * moF_b[:nalpha, nalpha:]\n gradient_norm_b = np.linalg.norm(gradient_b)\n\n gradient_norm = gradient_norm_a + gradient_norm_b\n\n # Conventional updates\n if np.any(np.abs(gradient_a) > 0.3) or np.any(np.abs(gradient_b) > 0.3):\n Fa = diisa.extrapolate()\n Fb = diisb.extrapolate()\n\n # Diagonalize Fock matrix\n Ca, Da = diag_H(Fa, nbeta)\n Cb, Db = diag_H(Fb, nalpha)\n\n else:\n so_diis = scf_helper.DIIS_helper()\n\n # Initial guess & Jacobi preconditioner for alpha & beta\n eps_a = np.diag(moF_a)\n precon_a = -4 * (eps_a[:nbeta].reshape(-1, 1) - eps_a[nbeta:])\n x_a = gradient_a / precon_a\n\n eps_b = np.diag(moF_b)\n precon_b = -4 * (eps_b[:nalpha].reshape(-1, 1) - eps_b[nalpha:])\n x_b = gradient_b / precon_b\n\n Hx_a, Hx_b = SCF_Hx(x_a, x_b, moF_a, Co_a, Cv_a, moF_b, Co_b, Cv_b)\n\n r_a = gradient_a - Hx_a\n z_a = r_a / precon_a\n p_a = z_a.copy()\n\n r_b = gradient_b - Hx_b\n z_b = r_b / precon_b\n p_b = z_b.copy()\n\n # PCG Iterations for alpha & beta\n for rot_iter in range(max_micro):\n rz_old = np.vdot(r_a, z_a) + np.vdot(r_b, z_b)\n\n Hx_a, Hx_b = SCF_Hx(p_a, p_b, moF_a, Co_a, Cv_a, moF_b, Co_b, Cv_b)\n\n alpha = rz_old / (np.vdot(Hx_a, p_a) + np.vdot(Hx_b, p_b))\n\n # CG update\n x_a += alpha * p_a\n r_a -= alpha * Hx_a\n z_a = r_a / precon_a\n\n x_b += alpha * p_b\n r_b -= alpha * Hx_b\n z_b = r_b / precon_b\n\n x_diis = np.hstack((x_a.ravel(), x_b.ravel()))\n r_diis = np.hstack((r_a.ravel(), r_b.ravel()))\n so_diis.add(x_diis, r_diis)\n\n rms_a = (np.linalg.norm(r_a) / gradient_norm_a) ** 0.5\n rms_b = (np.linalg.norm(r_b) / gradient_norm_b) ** 0.5\n\n if gradient_norm > 1.e-2:\n denom = gradient_norm\n else:\n denom = 1.e-2\n rms = ((np.linalg.norm(r_a) + np.linalg.norm(r_b)) / denom) ** 0.5\n\n if micro_print:\n print('Micro Iteration %2d: Rel. RMS = %1.5e (a: %1.2e, b: %1.2e)' % (rot_iter + 1, rms, rms_a, rms_b))\n if rms < micro_conv:\n break\n\n beta = (np.vdot(r_a, z_a) + np.vdot(r_b, z_b)) / rz_old\n\n p_a = z_a + beta * p_a\n p_b = z_b + beta * p_b\n\n x = so_diis.extrapolate()\n x_a = x[:x_a.size].reshape(x_a.shape)\n x_b = x[x_a.size:].reshape(x_b.shape)\n\n # Diagonalize Fock matrix\n Ca, Da = scf_helper.rotate_orbitals(Ca, x_a, True)\n Cb, Db = scf_helper.rotate_orbitals(Cb, x_b, True)\n\n if SCF_ITER == maxiter:\n psi4.core.clean()\n raise Exception(\"Maximum number of SCF cycles exceeded.\")\n\nprint('Total time for SCF iterations: %.3f seconds \\n' % (time.time() - t))\n\nspin_mat = (Cb[:, :nalpha].T).dot(S).dot(Ca[:, :nbeta])\nspin_contam = min(nbeta, nalpha) - np.vdot(spin_mat, spin_mat)\nprint('Spin Contamination Metric: %1.5E\\n' % spin_contam)\n\nprint('Final SCF energy: %.8f hartree' % SCF_E)\n\n# Compare to Psi4\nSCF_E_psi = psi4.energy('SCF')\npsi4.compare_values(SCF_E_psi, SCF_E, 6, 'SCF Energy')\n"
] | [
[
"numpy.set_printoptions",
"numpy.einsum"
],
[
"numpy.diag",
"numpy.dot",
"numpy.abs",
"numpy.einsum",
"numpy.asarray",
"numpy.set_printoptions",
"numpy.linalg.norm",
"numpy.linalg.eigh",
"numpy.mean",
"numpy.vdot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KoconJan/BERT-NER-CLI | [
"6f1323bf6294bc05ee3ee9a58e5b932a68bb85c0"
] | [
"bert_ner.py"
] | [
"#! usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n\"\"\"\r\nCopyright 2018 The Google AI Language Team Authors.\r\nBASED ON Google_BERT.\r\n@Author:zhoukaiyin\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport collections\r\nimport os\r\nfrom bert import modeling\r\nfrom bert import optimization\r\nfrom bert import tokenization\r\nimport tensorflow as tf\r\nfrom sklearn.metrics import f1_score,precision_score,recall_score\r\nfrom tensorflow.python.ops import math_ops\r\nimport tf_metrics\r\n\r\nflags = tf.flags\r\n\r\nFLAGS = flags.FLAGS\r\n\r\nflags.DEFINE_string(\r\n \"data_dir\", './drive/My Drive/ai/NERdata',\r\n \"The input datadir.\",\r\n)\r\n\r\nflags.DEFINE_string(\r\n \"bert_config_file\", './drive/My Drive/ai/checkpoint/bert_config.json',\r\n \"The config json file corresponding to the pre-trained BERT model.\"\r\n)\r\n\r\nflags.DEFINE_string(\r\n \"task_name\", 'NER', \"The name of the task to train.\"\r\n)\r\n\r\nflags.DEFINE_string(\r\n \"output_dir\", './drive/My Drive/ai/output/result_dir/',\r\n \"The output directory where the model checkpoints will be written.\"\r\n)\r\n\r\nflags.DEFINE_string(\r\n \"tpu_name\", 'gcp_tpu',\r\n \"Use Google Cloud Colaborator TPU to train\"\r\n)\r\n\r\n## Other parameters\r\nflags.DEFINE_string(\r\n \"init_checkpoint\", './drive/My Drive/ai/checkpoint/bert_model.ckpt',\r\n \"Initial checkpoint (usually from a pre-trained BERT model).\"\r\n)\r\n\r\nflags.DEFINE_bool(\r\n \"do_lower_case\", True,\r\n \"Whether to lower case the input text.\"\r\n)\r\n\r\nflags.DEFINE_integer(\r\n \"max_seq_length\", 128,\r\n \"The maximum total input sequence length after WordPiece tokenization.\"\r\n)\r\n\r\nflags.DEFINE_bool(\r\n \"do_train\", True,\r\n \"Whether to run training.\"\r\n)\r\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\r\n\r\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\r\n\r\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\r\n\r\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\r\n\r\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\r\n\r\nflags.DEFINE_float(\"num_train_epochs\", 3.0, \"Total number of training epochs to perform.\")\r\n\r\nflags.DEFINE_float(\r\n \"warmup_proportion\", 0.1,\r\n \"Proportion of training to perform linear learning rate warmup for. \"\r\n \"E.g., 0.1 = 10% of training.\")\r\n\r\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\r\n \"How often to save the model checkpoint.\")\r\n\r\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\r\n \"How many steps to make in each estimator call.\")\r\n\r\nflags.DEFINE_string(\"vocab_file\", './drive/My Drive/ai/checkpoint/vocab.txt',\r\n \"The vocabulary file that the BERT model was trained on.\")\r\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\r\nflags.DEFINE_integer(\r\n \"num_tpu_cores\", 8,\r\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\r\n\r\nclass InputExample(object):\r\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\r\n\r\n def __init__(self, guid, text, label=None):\r\n \"\"\"Constructs a InputExample.\r\n\r\n Args:\r\n guid: Unique id for the example.\r\n text_a: string. The untokenized text of the first sequence. For single\r\n sequence tasks, only this sequence must be specified.\r\n label: (Optional) string. The label of the example. This should be\r\n specified for train and dev examples, but not for test examples.\r\n \"\"\"\r\n self.guid = guid\r\n self.text = text\r\n self.label = label\r\n\r\n\r\nclass InputFeatures(object):\r\n \"\"\"A single set of features of data.\"\"\"\r\n\r\n def __init__(self, input_ids, input_mask, segment_ids, label_ids):\r\n self.input_ids = input_ids\r\n self.input_mask = input_mask\r\n self.segment_ids = segment_ids\r\n self.label_ids = label_ids\r\n\r\n\r\nclass DataProcessor(object):\r\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\r\n\r\n def get_train_examples(self, data_dir):\r\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n def get_dev_examples(self, data_dir):\r\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n def get_labels(self):\r\n \"\"\"Gets the list of labels for this data set.\"\"\"\r\n raise NotImplementedError()\r\n\r\n @classmethod\r\n def _read_data(cls, input_file):\r\n \"\"\"Reads a BIO data.\"\"\"\r\n with open(input_file) as f:\r\n lines = []\r\n words = []\r\n labels = []\r\n for line in f:\r\n contends = line.strip()\r\n word = line.strip().split(' ')[0]\r\n label = line.strip().split(' ')[-1]\r\n if contends.startswith(\"-DOCSTART-\"):\r\n words.append('')\r\n continue\r\n if len(contends) == 0 and words[-1] == '.':\r\n l = ' '.join([label for label in labels if len(label) > 0])\r\n w = ' '.join([word for word in words if len(word) > 0])\r\n lines.append([l, w])\r\n words = []\r\n labels = []\r\n continue\r\n words.append(word)\r\n labels.append(label)\r\n return lines\r\n\r\n\r\nclass NerProcessor(DataProcessor):\r\n def get_train_examples(self, data_dir):\r\n return self._create_example(\r\n self._read_data(os.path.join(data_dir, \"train.txt\")), \"train\"\r\n )\r\n\r\n def get_dev_examples(self, data_dir):\r\n return self._create_example(\r\n self._read_data(os.path.join(data_dir, \"dev.txt\")), \"dev\"\r\n )\r\n\r\n def get_labels(self):\r\n return [\"B-MISC\", \"I-MISC\", \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", \"X\"]\r\n\r\n def _create_example(self, lines, set_type):\r\n examples = []\r\n for (i, line) in enumerate(lines):\r\n guid = \"%s-%s\" % (set_type, i)\r\n text = tokenization.convert_to_unicode(line[1])\r\n label = tokenization.convert_to_unicode(line[0])\r\n examples.append(InputExample(guid=guid, text=text, label=label))\r\n return examples\r\n\r\n\r\ndef convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):\r\n label_map = {}\r\n for (i, label) in enumerate(label_list, 1):\r\n label_map[label] = i\r\n textlist = example.text.split(' ')\r\n labellist = example.label.split(' ')\r\n tokens = []\r\n labels = []\r\n for i, word in enumerate(textlist):\r\n token = tokenizer.tokenize(word)\r\n tokens.extend(token)\r\n label_1 = labellist[i]\r\n for m in range(len(token)):\r\n if m == 0:\r\n labels.append(label_1)\r\n else:\r\n labels.append(\"X\")\r\n # tokens = tokenizer.tokenize(example.text)\r\n if len(tokens) >= max_seq_length - 1:\r\n tokens = tokens[0:(max_seq_length - 2)]\r\n labels = labels[0:(max_seq_length - 2)]\r\n ntokens = []\r\n segment_ids = []\r\n label_ids = []\r\n ntokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n label_ids.append(0)\r\n for i, token in enumerate(tokens):\r\n ntokens.append(token)\r\n segment_ids.append(0)\r\n label_ids.append(label_map[labels[i]])\r\n ntokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n label_ids.append(0)\r\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\r\n input_mask = [1] * len(input_ids)\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n label_ids.append(0)\r\n # print(len(input_ids))\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n assert len(label_ids) == max_seq_length\r\n\r\n if ex_index < 5:\r\n tf.logging.info(\"*** Example ***\")\r\n tf.logging.info(\"guid: %s\" % (example.guid))\r\n tf.logging.info(\"tokens: %s\" % \" \".join(\r\n [tokenization.printable_text(x) for x in tokens]))\r\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n tf.logging.info(\"label_ids: %s\" % \" \".join([str(x) for x in label_ids]))\r\n feature = InputFeatures(\r\n input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n label_ids=label_ids\r\n )\r\n return feature\r\n\r\n\r\ndef filed_based_convert_examples_to_features(\r\n examples, label_list, max_seq_length, tokenizer, output_file\r\n):\r\n writer = tf.python_io.TFRecordWriter(output_file)\r\n for (ex_index, example) in enumerate(examples):\r\n if ex_index % 5000 == 0:\r\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\r\n feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)\r\n\r\n def create_int_feature(values):\r\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\r\n return f\r\n\r\n features = collections.OrderedDict()\r\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\r\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\r\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\r\n features[\"label_ids\"] = create_int_feature(feature.label_ids)\r\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\r\n writer.write(tf_example.SerializeToString())\r\n\r\n\r\ndef file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder):\r\n name_to_features = {\r\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\r\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\r\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\r\n \"label_ids\": tf.FixedLenFeature([seq_length], tf.int64),\r\n }\r\n\r\n def _decode_record(record, name_to_features):\r\n example = tf.parse_single_example(record, name_to_features)\r\n for name in list(example.keys()):\r\n t = example[name]\r\n if t.dtype == tf.int64:\r\n t = tf.to_int32(t)\r\n example[name] = t\r\n return example\r\n\r\n def input_fn(params):\r\n batch_size = params[\"batch_size\"]\r\n d = tf.data.TFRecordDataset(input_file)\r\n if is_training:\r\n d = d.repeat()\r\n d = d.shuffle(buffer_size=100)\r\n d = d.apply(tf.contrib.data.map_and_batch(\r\n lambda record: _decode_record(record, name_to_features),\r\n batch_size=batch_size,\r\n drop_remainder=drop_remainder\r\n ))\r\n return d\r\n return input_fn\r\n\r\n\r\ndef create_model(bert_config, is_training, input_ids, input_mask,\r\n segment_ids, labels, num_labels, use_one_hot_embeddings):\r\n model = modeling.BertModel(\r\n config=bert_config,\r\n is_training=is_training,\r\n input_ids=input_ids,\r\n input_mask=input_mask,\r\n token_type_ids=segment_ids,\r\n use_one_hot_embeddings=use_one_hot_embeddings\r\n )\r\n\r\n output_layer = model.get_sequence_output()\r\n\r\n hidden_size = output_layer.shape[-1].value\r\n\r\n output_weight = tf.get_variable(\r\n \"output_weights\", [num_labels, hidden_size],\r\n initializer=tf.truncated_normal_initializer(stddev=0.02)\r\n )\r\n output_bias = tf.get_variable(\r\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer()\r\n )\r\n with tf.variable_scope(\"loss\"):\r\n if is_training:\r\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\r\n output_layer = tf.reshape(output_layer, [-1, hidden_size])\r\n logits = tf.matmul(output_layer, output_weight, transpose_b=True)\r\n logits = tf.nn.bias_add(logits, output_bias)\r\n logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11])\r\n log_probs = tf.nn.log_softmax(logits, axis=-1)\r\n\r\n # labels = tf.cast(labels,dtype=tf.float32)\r\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\r\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\r\n loss = tf.reduce_sum(per_example_loss)\r\n return (loss, per_example_loss, logits)\r\n\r\n\r\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\r\n num_train_steps, num_warmup_steps, use_tpu,\r\n use_one_hot_embeddings):\r\n def model_fn(features, labels, mode, params):\r\n tf.logging.info(\"*** Features ***\")\r\n for name in sorted(features.keys()):\r\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\r\n input_ids = features[\"input_ids\"]\r\n input_mask = features[\"input_mask\"]\r\n segment_ids = features[\"segment_ids\"]\r\n label_ids = features[\"label_ids\"]\r\n\r\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\r\n\r\n (total_loss, per_example_loss, logits) = create_model(\r\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\r\n num_labels, use_one_hot_embeddings)\r\n tvars = tf.trainable_variables()\r\n scaffold_fn = None\r\n if init_checkpoint:\r\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\r\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\r\n if use_tpu:\r\n def tpu_scaffold():\r\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\r\n return tf.train.Scaffold()\r\n scaffold_fn = tpu_scaffold\r\n else:\r\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\r\n tf.logging.info(\"**** Trainable Variables ****\")\r\n\r\n for var in tvars:\r\n init_string = \"\"\r\n if var.name in initialized_variable_names:\r\n init_string = \", *INIT_FROM_CKPT*\"\r\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\r\n init_string)\r\n output_spec = None\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n train_op = optimization.create_optimizer(\r\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\r\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\r\n mode=mode,\r\n loss=total_loss,\r\n train_op=train_op,\r\n scaffold_fn=scaffold_fn)\r\n elif mode == tf.estimator.ModeKeys.EVAL:\r\n\r\n def metric_fn(per_example_loss, label_ids, logits):\r\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\r\n precision = tf_metrics.precision(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average=\"macro\")\r\n recall = tf_metrics.recall(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average=\"macro\")\r\n f = tf_metrics.f1(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average=\"macro\")\r\n loss = tf.metrics.mean(per_example_loss)\r\n return {\r\n \"eval_precision\":precision,\r\n \"eval_recall\":recall,\r\n \"eval_f\": f,\r\n \"eval_loss\": loss,\r\n }\r\n\r\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\r\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\r\n mode=mode,\r\n loss=total_loss,\r\n eval_metrics=eval_metrics,\r\n scaffold_fn=scaffold_fn)\r\n else:\r\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\r\n\r\n return output_spec\r\n\r\n return model_fn\r\n\r\n\r\ndef main(_):\r\n tf.logging.set_verbosity(tf.logging.INFO)\r\n processors = {\r\n \"ner\": NerProcessor\r\n }\r\n if not FLAGS.do_train and not FLAGS.do_eval:\r\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\r\n\r\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\r\n\r\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\r\n raise ValueError(\r\n \"Cannot use sequence length %d because the BERT model \"\r\n \"was only trained up to sequence length %d\" %\r\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\r\n\r\n task_name = FLAGS.task_name.lower()\r\n if task_name not in processors:\r\n raise ValueError(\"Task not found: %s\" % (task_name))\r\n processor = processors[task_name]()\r\n\r\n label_list = processor.get_labels()\r\n\r\n tokenizer = tokenization.FullTokenizer(\r\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\r\n tpu_cluster_resolver = None\r\n if FLAGS.use_tpu and FLAGS.tpu_name:\r\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR'])\r\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\r\n run_config = tf.contrib.tpu.RunConfig(\r\n cluster=tpu_cluster_resolver,\r\n master=FLAGS.master,\r\n model_dir=FLAGS.output_dir,\r\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\r\n tpu_config=tf.contrib.tpu.TPUConfig(\r\n iterations_per_loop=FLAGS.iterations_per_loop,\r\n num_shards=FLAGS.num_tpu_cores,\r\n per_host_input_for_training=is_per_host))\r\n\r\n train_examples = None\r\n num_train_steps = None\r\n num_warmup_steps = None\r\n if FLAGS.do_train:\r\n train_examples = processor.get_train_examples(FLAGS.data_dir)\r\n num_train_steps = int(\r\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\r\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\r\n model_fn = model_fn_builder(\r\n bert_config=bert_config,\r\n num_labels=len(label_list)+1,\r\n init_checkpoint=FLAGS.init_checkpoint,\r\n learning_rate=FLAGS.learning_rate,\r\n num_train_steps=num_train_steps,\r\n num_warmup_steps=num_warmup_steps,\r\n use_tpu=FLAGS.use_tpu,\r\n use_one_hot_embeddings=FLAGS.use_tpu)\r\n estimator = tf.contrib.tpu.TPUEstimator(\r\n use_tpu=FLAGS.use_tpu,\r\n model_fn=model_fn,\r\n config=run_config,\r\n train_batch_size=FLAGS.train_batch_size,\r\n eval_batch_size=FLAGS.eval_batch_size)\r\n\r\n if FLAGS.do_train:\r\n train_file = os.path.join(FLAGS.output_dir, \"train.tf_record\")\r\n filed_based_convert_examples_to_features(\r\n train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)\r\n tf.logging.info(\"***** Running training *****\")\r\n tf.logging.info(\" Num examples = %d\", len(train_examples))\r\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\r\n tf.logging.info(\" Num steps = %d\", num_train_steps)\r\n train_input_fn = file_based_input_fn_builder(\r\n input_file=train_file,\r\n seq_length=FLAGS.max_seq_length,\r\n is_training=True,\r\n drop_remainder=True)\r\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\r\n if FLAGS.do_eval:\r\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\r\n eval_file = os.path.join(FLAGS.output_dir, \"eval.tf_record\")\r\n filed_based_convert_examples_to_features(\r\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)\r\n\r\n tf.logging.info(\"***** Running evaluation *****\")\r\n tf.logging.info(\" Num examples = %d\", len(eval_examples))\r\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\r\n eval_steps = None\r\n if FLAGS.use_tpu:\r\n eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)\r\n eval_drop_remainder = True if FLAGS.use_tpu else False\r\n eval_input_fn = file_based_input_fn_builder(\r\n input_file=eval_file,\r\n seq_length=FLAGS.max_seq_length,\r\n is_training=False,\r\n drop_remainder=eval_drop_remainder)\r\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\r\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\r\n with open(output_eval_file, \"w\") as writer:\r\n tf.logging.info(\"***** Eval results *****\")\r\n for key in sorted(result.keys()):\r\n tf.logging.info(\" %s = %s\", key, str(result[key]))\r\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n tf.app.run()\r\n\r\n\r\n"
] | [
[
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.FixedLenFeature",
"tensorflow.nn.log_softmax",
"tensorflow.reduce_sum",
"tensorflow.train.init_from_checkpoint",
"tensorflow.to_int32",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.data.TFRecordDataset",
"tensorflow.truncated_normal_initializer",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.parse_single_example",
"tensorflow.argmax",
"tensorflow.app.run",
"tensorflow.nn.dropout",
"tensorflow.metrics.mean",
"tensorflow.matmul",
"tensorflow.zeros_initializer",
"tensorflow.logging.info",
"tensorflow.one_hot",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.train.Features",
"tensorflow.nn.bias_add",
"tensorflow.train.Scaffold",
"tensorflow.flags.DEFINE_string",
"tensorflow.reshape",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
NSLS-II/bluesky | [
"b7d666e65cf4ef556fb46b744c33264c8e3f7507"
] | [
"bluesky/simulators.py"
] | [
"from warnings import warn\nfrom bluesky.utils import maybe_await\nfrom bluesky.preprocessors import print_summary_wrapper\nfrom bluesky.run_engine import call_in_bluesky_event_loop, in_bluesky_event_loop\nfrom .protocols import Checkable\n\n\ndef plot_raster_path(plan, x_motor, y_motor, ax=None, probe_size=None, lw=2):\n \"\"\"Plot the raster path for this plan\n\n Parameters\n ----------\n plan : iterable\n Must yield `Msg` objects and not be a co-routine\n\n x_motor, y_motor : str\n Names of the x and y motors\n\n ax : matplotlib.axes.Axes\n The axes to plot to, if none, make new figure + axes\n\n probe_size : float, optional\n If not None, use as radius of probe (in same units as motor positions)\n\n lw : float, optional\n Width of lines drawn between points\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib import collections as mcollections\n from matplotlib import patches as mpatches\n if ax is None:\n ax = plt.subplots()[1]\n ax.set_aspect('equal')\n\n cur_x = cur_y = None\n traj = []\n for msg in plan:\n cmd = msg.command\n if cmd == 'set':\n if msg.obj.name == x_motor:\n cur_x = msg.args[0]\n if msg.obj.name == y_motor:\n cur_y = msg.args[0]\n elif cmd == 'save':\n traj.append((cur_x, cur_y))\n\n x, y = zip(*traj)\n path, = ax.plot(x, y, marker='', linestyle='-', lw=lw)\n ax.set_xlabel(x_motor)\n ax.set_ylabel(y_motor)\n if probe_size is None:\n read_points = ax.scatter(x, y, marker='o', lw=lw)\n else:\n circles = [mpatches.Circle((_x, _y), probe_size,\n facecolor='black', alpha=0.5)\n for _x, _y in traj]\n\n read_points = mcollections.PatchCollection(circles,\n match_original=True)\n ax.add_collection(read_points)\n return {'path': path, 'events': read_points}\n\n\ndef summarize_plan(plan):\n \"\"\"Print summary of plan\n\n Prints a minimal version of the plan, showing only moves and\n where events are created.\n\n Parameters\n ----------\n plan : iterable\n Must yield `Msg` objects\n \"\"\"\n for msg in print_summary_wrapper(plan):\n ...\n\n\nprint_summary = summarize_plan # back-compat\n\n\ndef check_limits(plan):\n \"\"\"Run check_limits_async in the RE\"\"\"\n if in_bluesky_event_loop():\n raise RuntimeError(\"Can't call check_limits() from within RE, use await check_limits_async() instead\")\n call_in_bluesky_event_loop(check_limits_async(plan))\n\n\nasync def check_limits_async(plan):\n \"\"\"\n Check that a plan will not move devices outside of their limits.\n\n Parameters\n ----------\n plan : iterable\n Must yield `Msg` objects\n \"\"\"\n ignore = []\n for msg in plan:\n obj = msg.obj\n if msg.command == 'set' and obj not in ignore:\n if isinstance(obj, Checkable):\n await maybe_await(obj.check_value(msg.args[0]))\n else:\n warn(f\"{obj.name} has no check_value() method\"\n f\" to check if {msg.args[0]} is within its limits.\")\n ignore.append(obj)\n"
] | [
[
"matplotlib.collections.PatchCollection",
"matplotlib.patches.Circle",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bglick13/multi-agent-emergence-environments | [
"e02d66f0734d95470d15a4508ff369a75fa093a4"
] | [
"ma_planning/ma_policy.py"
] | [
"import numpy as np\n\nfrom collections import deque\nfrom typing import Union\nfrom torch import nn, FloatTensor, LongTensor\nfrom torch.functional import F\nfrom torch.optim import Adam\nfrom torch.nn import CrossEntropyLoss\n\nfrom mae_envs.envs import DraftState\nfrom mcts import SearchNode, SearchProblem\n\n\n\nclass SwarmAgent():\n def __init__(self, model, env):\n self.model = model\n self.env = env\n self.macro_action = None\n\n def set_action(self, action):\n self.macro_action = action\n\n def act(self):\n return self.macro_action\n\n\nclass CaptainAgent():\n def __init__(self, model, env, agents):\n self.model = model\n self.best_model = model\n self.env = env\n self.agents = agents\n self.solver = None\n\n def simulate(self):\n leaf = self.solver.rollout()\n value = self.evaluate_leaf(leaf)\n self.solver.backup(leaf, value)\n return leaf\n\n def get_action(self, obs, num_reads=100, action=-1, random=False):\n if self.solver is None:\n self.root = SearchNode(obs, action)\n self.solver = SearchProblem(self.root)\n else:\n self.root = SearchNode(obs, action, self.root)\n self.solver.root = self.root\n\n leafs = []\n for _ in range(num_reads):\n leafs.append(self.simulate())\n\n action, value, values = self.root.best_child()\n successor, _, _, _ = env.step(action)\n nn_probs, nn_value = self.get_preds(successor)\n p = F.softmax(FloatTensor(values), -1).numpy()\n if random:\n action = np.random.choice(range(len(values)), p=p)\n else:\n top5 = values.argsort()[-5:]\n _p = F.softmax(FloatTensor(values[top5]), -1).numpy()\n action = np.random.choice(top5, p=_p)\n return action, values, p, nn_value, leafs\n\n def get_preds(self, obs):\n s_in = torch.FloatTensor(obs)\n s_in.requires_grad = False\n encoded_s = self.model.forward(s_in)\n probs = self.model.get_next_action_output(encoded_s) # n_agents x 3 x 11\n probs = F.softmax(torch.FloatTensor(probs), dim=2).detach().cpu().numpy()\n value = F.softmax(self.model.get_value_output(encoded_s)).detach().cpu().numpy()\n return probs, value\n\n def evaluate_leaf(self, leaf):\n probs, value = self.get_preds(leaf)\n if not leaf.is_terminal:\n leaf.expand(probs)\n return value"
] | [
[
"torch.FloatTensor",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
evamariaa/Eureka | [
"a3e739a528fbe85ec588bca996188765649b7778"
] | [
"eureka/S3_data_reduction/nirspec.py"
] | [
"# NIRSpec specific rountines go here\nimport os\nimport numpy as np\nfrom astropy.io import fits\nfrom . import sigrej, background, nircam\nfrom . import bright2flux as b2f\n\ndef read(filename, data, meta):\n '''Reads single FITS file from JWST's NIRCam instrument.\n\n Parameters\n ----------\n filename: str\n Single filename to read\n data: DataClass\n The data object in which the fits data will stored\n meta: MetaClass\n The metadata object\n\n Returns\n -------\n data: DataClass\n The updated data object with the fits data stored inside\n\n Notes\n -----\n History:\n\n - November 2012 Kevin Stevenson\n Initial version\n - June 2021 Aarynn Carter/Eva-Maria Ahrer\n Updated for NIRSpec\n '''\n\n assert isinstance(filename, str)\n\n # Decide whether to perform the Stage 2 processing ourselves.\n # if stage2_processing:\n # \t# Run pipeline on a *_rateints.fits Stage 1 data product, but avoiding significant subarray trimming.\n # \tstage2_filename = process_to_stage2(filename, do_assignwcs=do_assignwcs, do_extract2d=do_extract2d, do_srctype=do_srctype, do_flatfield=do_flatfield, do_photom=do_photom, delete_files=delete_files)\n # else:\n # \t# Use the input file as is.\n # \tstage2_filename = filename\n\n\n # Now we can start working with the data.\n hdulist \t\t= fits.open(filename)\n data.mhdr \t\t= hdulist[0].header\n data.shdr \t\t= hdulist['SCI',1].header\n\n data.intstart \t= 1\n print(' WARNING: Manually setting INTSTART to 1 for NIRSpec CV3 data.')\n #data.intstart = data.mhdr['INTSTART']\n data.intend \t= data.mhdr['NINTS']\n\n data.data \t\t= hdulist['SCI',1].data\n data.err \t\t= hdulist['ERR',1].data\n data.dq \t\t= hdulist['DQ',1].data\n data.wave \t\t= hdulist['WAVELENGTH',1].data\n data.v0 \t\t= hdulist['VAR_RNOISE',1].data\n data.int_times\t= hdulist['INT_TIMES',1].data[data.intstart-1:data.intend]\n\n # Record integration mid-times in BJD_TDB\n # data.bjdtdb = data.int_times['int_mid_BJD_TDB']\n # There is no time information in the simulated NIRSpec data\n print(' WARNING: The timestamps for the simulated NIRSpec data are currently '\n 'hardcoded because they are not in the .fits files themselves')\n data.bjdtdb = np.linspace(data.mhdr['EXPSTART'], data.mhdr['EXPEND'], data.intend)\n\n # NIRSpec CV3 data has a lot of NaNs in the data and err arrays, which is making life difficult.\n print(' WARNING: Manually changing NaNs from DATA and ERR arrays to 0 for the CV3 data')\n data.err[np.where(np.isnan(data.err))] = np.inf\n data.data[np.where(np.isnan(data.data))] = 0\n\n return data, meta\n\n\ndef flag_bg(data, meta):\n '''Outlier rejection of sky background along time axis.\n\n Uses the code written for NIRCam and untested for NIRSpec, but likely to still work\n\n Parameters\n ----------\n data: DataClass\n The data object in which the fits data will stored\n meta: MetaClass\n The metadata object\n\n Returns\n -------\n data: DataClass\n The updated data object with outlier background pixels flagged.\n '''\n return nircam.flag_bg(data, meta)\n\n\ndef fit_bg(data, meta, mask, y1, y2, bg_deg, p3thresh, n, isplots=False):\n '''Fit for a non-uniform background.\n\n Uses the code written for NIRCam and untested for NIRSpec, but likely to still work\n '''\n return nircam.fit_bg(data, meta, mask, y1, y2, bg_deg, p3thresh, n, isplots=isplots)\n"
] | [
[
"numpy.isnan",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lvwj19/PPR-Net- | [
"e5d305b39a1fa453fb3f58ed51468008e7bfa5a3"
] | [
"pprnet/utils/visualize_util.py"
] | [
"import os\nimport numpy as np\nimport random\nimport h5py\nimport sys\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, '..'))\nimport show3d_balls \n\ndef show_points(point_array, color_array=None, radius=3):\n assert isinstance(point_array, list)\n all_color = None\n if color_array is not None:\n if color_array == 'random':\n color_array = [np.array([random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]) for i in range(len(point_array))]\n assert len(point_array) == len(color_array)\n all_color = [ np.zeros( [ pnts.shape[0] ,3] ) for pnts in point_array]\n \n for i, c in enumerate(color_array):\n c=c.tolist()\n all_color[i][:] = [c[1],c[0],c[2]]\n all_color = np.concatenate(all_color, axis=0)\n all_points = np.concatenate(point_array, axis=0)\n show3d_balls.showpoints(all_points, c_gt=all_color, ballradius=radius)\n\ndef show_models(model_pc, trans, rot_mat, cls_idx, color_array=None, radius=3):\n assert len(trans) == len(rot_mat) == len(cls_idx)\n all_points = []\n all_color = [] if color_array is not None else None\n if color_array == 'random':\n color_array = [ [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)] for i in range(len(cls_idx))]\n for i in range(len(cls_idx)):\n model_pc_transformed = np.dot(model_pc[cls_idx[i]], rot_mat[i].T) + \\\n np.tile(np.reshape(trans[i], [1, 3]), [model_pc[cls_idx[i]].shape[0], 1])\n all_points.append(model_pc_transformed)\n colors_tmp = np.tile(np.array(color_array[i]).reshape(1,3).astype(np.float32), [model_pc_transformed.shape[0], 1])\n if all_color is not None:\n all_color.append(colors_tmp)\n \n all_points = np.concatenate(all_points, axis=0)\n if all_color is not None:\n all_color = np.concatenate(all_color, axis=0)\n show3d_balls.showpoints(all_points, c_gt=all_color, ballradius=radius)\n\ndef get_models_points(model_pc, trans, rot_mat, cls_idx):\n assert len(trans) == len(rot_mat) == len(cls_idx)\n all_points = []\n for i in range(len(cls_idx)):\n model_pc_transformed = np.dot(model_pc[cls_idx[i]], rot_mat[i].T) + \\\n np.tile(np.reshape(trans[i], [1, 3]), [model_pc[cls_idx[i]].shape[0], 1])\n all_points.append(model_pc_transformed)\n all_points = np.concatenate(all_points, axis=0)\n return all_points\n\n \n"
] | [
[
"numpy.dot",
"numpy.reshape",
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vgutta/AMPL | [
"46759aa84fd6acfc14facad0e14cb05a43d2e309"
] | [
"atomsci/ddm/pipeline/model_wrapper.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nContains class ModelWrapper and its subclasses, which are wrappers for DeepChem and scikit-learn model classes.\n\"\"\"\n\nimport logging\nimport os\nimport shutil\nimport joblib\nimport pdb\n\nimport deepchem as dc\nimport numpy as np\nimport tensorflow as tf\nif dc.__version__.startswith('2.1'):\n from deepchem.models.tensorgraph.fcnet import MultitaskRegressor, MultitaskClassifier\nelse:\n from deepchem.models.fcnet import MultitaskRegressor, MultitaskClassifier\nfrom collections import OrderedDict\nimport torch\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import RandomForestRegressor\n\ntry:\n import xgboost as xgb\n xgboost_supported = True\nexcept ImportError:\n xgboost_supported = False\n\nimport pickle\nimport yaml\nimport glob\nfrom datetime import datetime\nimport time\nimport socket\nfrom packaging import version\n\nfrom atomsci.ddm.utils import datastore_functions as dsf\nfrom atomsci.ddm.utils import llnl_utils\nfrom atomsci.ddm.pipeline import transformations as trans\nfrom atomsci.ddm.pipeline import perf_data as perf\n\nlogging.basicConfig(format='%(asctime)-15s %(message)s')\n\ndef dc_restore(model, checkpoint=None, model_dir=None, session=None):\n \"\"\"Reload the values of all variables from a checkpoint file.\n\n copied from DeepChem 2.3 keras_model.py to silence warnings caused\n when a model is loaded in inference mode.\n\n Args:\n model (DeepChem.KerasModel: keras model to restore\n\n checkpoint (str): the path to the checkpoint file to load. If this is None, the most recent\n checkpoint will be chosen automatically. Call get_checkpoints() to get a\n list of all available checkpoints.\n\n model_dir (str): default None\n Directory to restore checkpoint from. If None, use model.model_dir.\n\n session (tf.Session()) default None\n Session to run restore ops under. If None, model.session is used.\n\n Returns:\n None\n \"\"\"\n model._ensure_built()\n if model_dir is None:\n model_dir = model.model_dir\n if checkpoint is None:\n checkpoint = tf.train.latest_checkpoint(model_dir)\n if checkpoint is None:\n raise ValueError('No checkpoint found')\n if tf.executing_eagerly():\n # expect_partial() silences warnings when this model is restored for\n # inference only.\n model._checkpoint.restore(checkpoint).expect_partial()\n else:\n if session is None:\n session = model.session\n # expect_partial() silences warnings when this model is restored for\n # inference only.\n model._checkpoint.restore(checkpoint).expect_partial().run_restore_ops(session)\n\n# ****************************************************************************************\ndef create_model_wrapper(params, featurizer, ds_client=None):\n \"\"\"Factory function for creating Model objects of the correct subclass for params.model_type.\n\n Args:\n params (Namespace): Parameters passed to the model pipeline\n\n featurizer (Featurization): Object managing the featurization of compounds\n\n ds_client (DatastoreClient): Interface to the file datastore\n\n Returns:\n model (pipeline.Model): Wrapper for DeepChem, sklearn or other model.\n\n Raises:\n ValueError: Only params.model_type = 'NN', 'RF' or 'xgboost' is supported.\n \"\"\"\n if params.model_type == 'NN':\n return DCNNModelWrapper(params, featurizer, ds_client)\n elif params.model_type == 'RF':\n return DCRFModelWrapper(params, featurizer, ds_client)\n elif params.model_type == 'xgboost':\n if not xgboost_supported:\n raise Exception(\"Unable to import xgboost. \\\n xgboost package needs to be installed to use xgboost model. \\\n Installatin: \\\n from pip: pip3 install xgboost==0.90.\\\n livermore compute (lc): /usr/mic/bio/anaconda3/bin/pip install xgboost==0.90 --user \\\n twintron-blue (TTB): /opt/conda/bin/pip install xgboost==0.90 --user/ \\ \"\n )\n elif version.parse(xgb.__version__) < version.parse('0.9'):\n raise Exception(f\"xgboost required to be = 0.9 for GPU support. \\\n current version = xgb.__version__ \\\n installation: \\\n from pip: pip install xgboost==0.90\")\n else:\n return DCxgboostModelWrapper(params, featurizer, ds_client)\n elif params.model_type == 'hybrid':\n return HybridModelWrapper(params, featurizer, ds_client)\n else:\n raise ValueError(\"Unknown model_type %s\" % params.model_type)\n\n# ****************************************************************************************\n\nclass ModelWrapper(object):\n \"\"\"Wrapper for DeepChem and sklearn model objects. Provides methods to train and test a model,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n set in setup_model_dirs:\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n\n \"\"\"\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes ModelWrapper object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurization object): initialized outside of model_wrapper\n\n ds_client (DatastoreClient): Interface to the file datastore\n\n Side effects:\n Sets the following attributes of ModelWrapper:\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n \"\"\"\n self.params = params\n self.featurization = featurizer\n self.ds_client = ds_client\n self.log = logging.getLogger('ATOM')\n self.output_dir = self.params.output_dir\n self.model_dir = os.path.join(self.output_dir, 'model')\n os.makedirs(self.model_dir, exist_ok=True)\n self.transformers = []\n self.transformers_x = []\n\n # ****************************************************************************************\n\n def setup_model_dirs(self):\n \"\"\"Sets up paths and directories for persisting models at particular training epochs, used by\n the DeepChem model classes.\n\n Side effects:\n Sets the following attributes of ModelWrapper:\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n \"\"\"\n self.best_model_dir = os.path.join(self.output_dir, 'best_model')\n\n # ****************************************************************************************\n\n def train(self, pipeline):\n \"\"\"Trains a model (for multiple epochs if applicable), and saves the tuned model.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Raises:\n NotImplementedError: The method is implemented by subclasses\n \"\"\"\n raise NotImplementedError\n\n # ****************************************************************************************\n\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to the model type.\n\n Raises:\n NotImplementedError: The method is implemented by subclasses\n \"\"\"\n raise NotImplementedError\n # ****************************************************************************************\n\n def create_transformers(self, model_dataset):\n \"\"\"\n Initialize transformers for responses and/or features, and persist them for later.\n\n Args:\n model_dataset: The ModelDataset object that handles the current dataset\n\n Side effects\n Overwrites the attributes:\n transformers: A list of deepchem transformation objects on response_col, only if conditions are met\n\n transformers_x: A list of deepchem transformation objects on featurizers, only if conditions are met.\n\n params.transformer_key: A string pointing to the dataset key containing the transformer in the datastore, or the path to the transformer\n\n \"\"\"\n # TODO: Just a warning, we may have response transformers for classification datasets in the future\n if self.params.prediction_type=='regression' and self.params.transformers==True:\n # self.transformers = [\n # dc.trans.NormalizationTransformer(transform_y=True, dataset=model_dataset.dataset)]\n if self.params.model_type != \"hybrid\":\n self.transformers = [trans.NormalizationTransformerMissingData(transform_y=True, dataset=model_dataset.dataset)]\n else:\n self.transformers = [trans.NormalizationTransformerHybrid(transform_y=True, dataset=model_dataset.dataset)]\n\n # Set up transformers for features, if needed\n self.transformers_x = trans.create_feature_transformers(self.params, model_dataset)\n\n if len(self.transformers) > 0 or len(self.transformers_x) > 0:\n\n # Transformers are no longer saved as separate datastore objects; they are included in the model tarball\n self.params.transformer_key = os.path.join(self.output_dir, 'transformers.pkl')\n pickle.dump((self.transformers, self.transformers_x), open(self.params.transformer_key, 'wb'))\n self.log.info(\"Wrote transformers to %s\" % self.params.transformer_key)\n self.params.transformer_oid = \"\"\n self.params.transformer_bucket = \"\"\n\n # ****************************************************************************************\n\n def reload_transformers(self):\n \"\"\"\n Load response and feature transformers from datastore objects or files. Before AMPL v1.2 these\n were persisted as separate datastore objects when the model tracker was used; subsequently they\n are included in model tarballs, which should have been unpacked before this function gets called.\n \"\"\"\n\n # Try local path first to check for transformers unpacked from model tarball\n if not trans.transformers_needed(self.params):\n return\n local_path = f\"{self.output_dir}/transformers.pkl\"\n if os.path.exists(local_path):\n self.log.info(f\"Reloading transformers from model tarball {local_path}\")\n self.transformers, self.transformers_x = pickle.load(open(local_path, 'rb'))\n else:\n if self.params.transformer_key is not None:\n if self.params.save_results:\n self.log.info(f\"Reloading transformers from datastore key {self.params.transformer_key}\")\n self.transformers, self.transformers_x = dsf.retrieve_dataset_by_datasetkey(\n dataset_key = self.params.transformer_key,\n bucket = self.params.transformer_bucket,\n client = self.ds_client )\n else:\n self.log.info(f\"Reloading transformers from file {self.params.transformer_key}\")\n self.transformers, self.transformers_x = pickle.load(open( self.params.transformer_key, 'rb' ))\n else:\n # Shouldn't happen\n raise Exception(\"Transformers needed to reload model, but no transformer_key specified.\")\n\n\n # ****************************************************************************************\n\n def transform_dataset(self, dataset):\n \"\"\"\n Transform the responses and/or features in the given DeepChem dataset using the current transformers.\n\n Args:\n dataset: The DeepChem DiskDataset that contains a dataset\n\n Returns:\n transformed_dataset: The transformed DeepChem DiskDataset\n\n \"\"\"\n transformed_dataset = dataset\n if len(self.transformers) > 0:\n self.log.info(\"Transforming response data\")\n for transformer in self.transformers:\n transformed_dataset = transformer.transform(transformed_dataset)\n if len(self.transformers_x) > 0:\n self.log.info(\"Transforming feature data\")\n for transformer in self.transformers_x:\n transformed_dataset = transformer.transform(transformed_dataset)\n\n return transformed_dataset\n # ****************************************************************************************\n\n def get_num_features(self):\n \"\"\"Returns the number of dimensions of the feature space, taking both featurization method\n and transformers into account.\n \"\"\"\n if self.params.feature_transform_type == 'umap':\n return self.params.umap_dim\n else:\n return self.featurization.get_feature_count()\n\n # ****************************************************************************************\n\n def get_train_valid_pred_results(self, perf_data):\n \"\"\"Returns predicted values and metrics for the training, validation or test set\n associated with the PerfData object perf_data. Results are returned as a dictionary \n of parameter, value pairs in the format expected by the model tracker.\n\n Args:\n perf_data: A PerfData object that stores the predicted values and metrics\n\n Returns:\n dict: A dictionary of the prediction results\n\n \"\"\"\n return perf_data.get_prediction_results()\n\n # ****************************************************************************************\n def get_test_perf_data(self, model_dir, model_dataset):\n \"\"\"Returns the predicted values and metrics for the current test dataset against\n the version of the model stored in model_dir, as a PerfData object.\n\n Args:\n model_dir (str): Directory where the saved model is stored\n model_dataset (DiskDataset): Stores the current dataset and related methods\n\n Returns:\n perf_data: PerfData object containing the predicted values and metrics for the current test dataset\n \"\"\"\n # Load the saved model from model_dir\n self.reload_model(model_dir)\n\n # Create a PerfData object, which knows how to format the prediction results in the structure\n # expected by the model tracker.\n\n # We pass transformed=False to indicate that the preds and uncertainties we get from\n # generate_predictions are already untransformed, so that perf_data.get_prediction_results()\n # doesn't untransform them again.\n if hasattr(self.transformers[0], \"ishybrid\"):\n # indicate that we are training a hybrid model\n perf_data = perf.create_perf_data(\"hybrid\", model_dataset, self.transformers, 'test', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False)\n else:\n perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, self.transformers, 'test', transformed=False)\n test_dset = model_dataset.test_dset\n test_preds, test_stds = self.generate_predictions(test_dset)\n _ = perf_data.accumulate_preds(test_preds, test_dset.ids, test_stds)\n return perf_data\n\n # ****************************************************************************************\n def get_test_pred_results(self, model_dir, model_dataset):\n \"\"\"Returns predicted values and metrics for the current test dataset against the version\n of the model stored in model_dir, as a dictionary in the format expected by the model tracker.\n\n Args:\n model_dir (str): Directory where the saved model is stored\n model_dataset (DiskDataset): Stores the current dataset and related methods\n\n Returns:\n dict: A dictionary containing the prediction values and metrics for the current dataset.\n \"\"\"\n perf_data = self.get_test_perf_data(model_dir, model_dataset)\n return perf_data.get_prediction_results()\n\n # ****************************************************************************************\n def get_full_dataset_perf_data(self, model_dataset):\n \"\"\"Returns the predicted values and metrics from the current model for the full current dataset,\n as a PerfData object.\n\n Args:\n model_dataset (DiskDataset): Stores the current dataset and related methods\n\n Returns:\n perf_data: PerfData object containing the predicted values and metrics for the current full dataset\n \"\"\"\n\n # Create a PerfData object, which knows how to format the prediction results in the structure\n # expected by the model tracker.\n\n # We pass transformed=False to indicate that the preds and uncertainties we get from\n # generate_predictions are already untransformed, so that perf_data.get_prediction_results()\n # doesn't untransform them again.\n if hasattr(self.transformers[0], \"ishybrid\"):\n # indicate that we are training a hybrid model\n perf_data = perf.create_perf_data(\"hybrid\", model_dataset, self.transformers, 'full', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False)\n else:\n perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, self.transformers, 'full', transformed=False)\n full_preds, full_stds = self.generate_predictions(model_dataset.dataset)\n _ = perf_data.accumulate_preds(full_preds, model_dataset.dataset.ids, full_stds)\n return perf_data\n\n # ****************************************************************************************\n def get_full_dataset_pred_results(self, model_dataset):\n \"\"\"Returns predicted values and metrics from the current model for the full current dataset,\n as a dictionary in the format expected by the model tracker.\n\n Args:\n model_dataset (DiskDataset): Stores the current dataset and related methods\n\n Returns:\n dict: A dictionary containing predicted values and metrics for the current full dataset\n\n \"\"\"\n self.data = model_dataset\n perf_data = self.get_full_dataset_perf_data(model_dataset)\n return perf_data.get_prediction_results()\n\n def generate_predictions(self, dataset):\n \"\"\"\n\n Args:\n dataset:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n\n def reload_model(self, reload_dir):\n \"\"\"\n\n Args:\n reload_dir:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n\n\n # ****************************************************************************************\n def model_save(self):\n \"\"\"A wrapper function to save a model due to the `DeepChem model.save()` has inconsistent implementation.\n\n The `SKlearnModel()` class and xgboost model in DeepChem use `model.save()`,\n while the `MultitaskRegressor` class uses `model.save_checkpoint()`. The\n workaround is to try `model.save()` first. If failed, then try `model.save_checkpoint()`\n \"\"\"\n try:\n self.model.save()\n except Exception as error:\n try:\n self.model.save_checkpoint()\n except Exception as e:\n self.log.error(\"Error when saving model:\\n%s\" % str(e))\n\n\n# ****************************************************************************************\nclass DCNNModelWrapper(ModelWrapper):\n \"\"\"Contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n model_dir (str): The subdirectory under output_dir that contains the model. Created in setup_model_dirs.\n\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n\n g: The tensorflow graph object\n\n sess: The tensor flow graph session\n\n model: The dc.models.GraphConvModel, MultitaskRegressor, or MultitaskClassifier object, as specified by the params attribute\n\n Created in train:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n train_perf_data (np.array of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (np.array of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the training dataset\n\n valid_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the validation dataset\n\n \"\"\"\n\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes DCNNModelWrapper object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurizer object): initialized outside of model_wrapper\n\n Side effects:\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n g: The tensorflow graph object\n\n sess: The tensor flow graph session\n\n model: The dc.models.GraphConvModel, MultitaskRegressor, or MultitaskClassifier object, as specified by the params attribute\n\n\n \"\"\"\n super().__init__(params, featurizer, ds_client)\n self.g = tf.Graph()\n self.sess = tf.compat.v1.Session(graph=self.g)\n n_features = self.get_num_features()\n self.num_epochs_trained = 0\n\n if self.params.featurizer == 'graphconv':\n\n # Set defaults for layer sizes and dropouts, if not specified by caller. Note that\n # these depend on the featurizer used.\n\n if self.params.layer_sizes is None:\n self.params.layer_sizes = [64, 64, 128]\n if self.params.dropouts is None:\n if self.params.uncertainty:\n self.params.dropouts = [0.25] * len(self.params.layer_sizes)\n else:\n self.params.dropouts = [0.0] * len(self.params.layer_sizes)\n\n # TODO: Need to check that GraphConvModel params are actually being used\n self.model = dc.models.GraphConvModel(\n self.params.num_model_tasks,\n batch_size=self.params.batch_size,\n learning_rate=self.params.learning_rate,\n learning_rate_decay_time=1000,\n optimizer_type=self.params.optimizer_type,\n beta1=0.9,\n beta2=0.999,\n model_dir=self.model_dir,\n mode=self.params.prediction_type,\n tensorboard=False,\n uncertainty=self.params.uncertainty,\n graph_conv_layers=self.params.layer_sizes[:-1],\n dense_layer_size=self.params.layer_sizes[-1],\n dropout=self.params.dropouts,\n penalty=self.params.weight_decay_penalty,\n penalty_type=self.params.weight_decay_penalty_type)\n\n else:\n # Set defaults for layer sizes and dropouts, if not specified by caller. Note that\n # default layer sizes depend on the featurizer used.\n\n if self.params.layer_sizes is None:\n if self.params.featurizer == 'ecfp':\n self.params.layer_sizes = [1000, 500]\n elif self.params.featurizer in ['descriptors', 'computed_descriptors']:\n self.params.layer_sizes = [200, 100]\n else:\n # Shouldn't happen\n self.log.warning(\"You need to define default layer sizes for featurizer %s\" %\n self.params.featurizer)\n self.params.layer_sizes = [1000, 500]\n\n if self.params.dropouts is None:\n self.params.dropouts = [0.4] * len(self.params.layer_sizes)\n if self.params.weight_init_stddevs is None:\n self.params.weight_init_stddevs = [0.02] * len(self.params.layer_sizes)\n if self.params.bias_init_consts is None:\n self.params.bias_init_consts = [1.0] * len(self.params.layer_sizes)\n\n if self.params.prediction_type == 'regression':\n\n # TODO: Need to check that MultitaskRegressor params are actually being used\n self.model = MultitaskRegressor(\n self.params.num_model_tasks,\n n_features,\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n learning_rate=self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n batch_size=self.params.batch_size,\n seed=123,\n verbosity='low',\n model_dir=self.model_dir,\n learning_rate_decay_time=1000,\n beta1=0.9,\n beta2=0.999,\n mode=self.params.prediction_type,\n tensorboard=False,\n uncertainty=self.params.uncertainty)\n\n # print(\"JEA debug\",self.params.num_model_tasks,n_features,self.params.layer_sizes,self.params.weight_init_stddevs,self.params.bias_init_consts,self.params.dropouts,self.params.weight_decay_penalty,self.params.weight_decay_penalty_type,self.params.batch_size,self.params.learning_rate)\n # self.model = MultitaskRegressor(\n # self.params.num_model_tasks,\n # n_features,\n # layer_sizes=self.params.layer_sizes,\n # weight_init_stddevs=self.params.weight_init_stddevs,\n # bias_init_consts=self.params.bias_init_consts,\n # dropouts=self.params.dropouts,\n # weight_decay_penalty=self.params.weight_decay_penalty,\n # weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n # batch_size=self.params.batch_size,\n # learning_rate=self.params.learning_rate,\n # seed=123)\n\n else:\n # TODO: Need to check that MultitaskClassifier params are actually being used\n self.model = MultitaskClassifier(\n self.params.num_model_tasks,\n n_features,\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n learning_rate=self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n batch_size=self.params.batch_size,\n seed=123,\n verbosity='low',\n model_dir=self.model_dir,\n learning_rate_decay_time=1000,\n beta1=.9,\n beta2=.999,\n mode=self.params.prediction_type,\n tensorboard=False,\n n_classes=self.params.class_number)\n\n # ****************************************************************************************\n def recreate_model(self):\n \"\"\"\n Creates a new DeepChem Model object of the correct type for the requested featurizer and prediction type \n and returns it.\n \"\"\"\n if self.params.featurizer == 'graphconv':\n model = dc.models.GraphConvModel(\n self.params.num_model_tasks,\n batch_size=self.params.batch_size,\n learning_rate=self.params.learning_rate,\n learning_rate_decay_time=1000,\n optimizer_type=self.params.optimizer_type,\n beta1=0.9,\n beta2=0.999,\n model_dir=self.model_dir,\n mode=self.params.prediction_type,\n tensorboard=False,\n uncertainty=self.params.uncertainty,\n graph_conv_layers=self.params.layer_sizes[:-1],\n dense_layer_size=self.params.layer_sizes[-1],\n dropout=self.params.dropouts,\n penalty=self.params.weight_decay_penalty,\n penalty_type=self.params.weight_decay_penalty_type)\n\n else:\n n_features = self.get_num_features()\n if self.params.prediction_type == 'regression':\n model = MultitaskRegressor(\n self.params.num_model_tasks,\n n_features,\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n learning_rate=self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n batch_size=self.params.batch_size,\n seed=123,\n verbosity='low',\n model_dir=self.model_dir,\n learning_rate_decay_time=1000,\n beta1=0.9,\n beta2=0.999,\n mode=self.params.prediction_type,\n tensorboard=False,\n uncertainty=self.params.uncertainty)\n else:\n model = MultitaskClassifier(\n self.params.num_model_tasks,\n n_features,\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n learning_rate=self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n batch_size=self.params.batch_size,\n seed=123,\n verbosity='low',\n model_dir=self.model_dir,\n learning_rate_decay_time=1000,\n beta1=.9,\n beta2=.999,\n mode=self.params.prediction_type,\n tensorboard=False,\n n_classes=self.params.class_number)\n\n return model\n\n # ****************************************************************************************\n def train(self, pipeline):\n \"\"\"Trains a neural net model for multiple epochs, choose the epoch with the best validation\n set performance, refits the model for that number of epochs, and saves the tuned model.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Side effects:\n Sets the following attributes for DCNNModelWrapper:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n train_perf_data (list of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (list of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the training dataset\n\n valid_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the validation dataset\n \"\"\"\n # TODO: Fix docstrings above\n num_folds = len(pipeline.data.train_valid_dsets)\n if num_folds > 1:\n self.train_kfold_cv(pipeline)\n else:\n self.train_with_early_stopping(pipeline)\n\n # ****************************************************************************************\n def train_with_early_stopping(self, pipeline):\n \"\"\"Trains a neural net model for up to self.params.max_epochs epochs, while tracking the validation\n set metric given by params.model_choice_score_type. Saves a model checkpoint each time the metric\n is improved over its previous saved value by more than a threshold percentage. If the metric fails to\n improve for more than a specified 'patience' number of epochs, stop training and revert the model state\n to the last saved checkpoint. \n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Side effects:\n Sets the following attributes for DCNNModelWrapper:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n best_validation_score (float): The best validation model choice score attained during training.\n\n train_perf_data (list of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (list of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array): A standard training set performance metric (r2_score or roc_auc), at the end of each epoch.\n\n valid_epoch_perfs (np.array): A standard validation set performance metric (r2_score or roc_auc), at the end of each epoch.\n \"\"\"\n self.data = pipeline.data\n self.best_epoch = 0\n self.best_valid_score = None\n self.early_stopping_min_improvement = self.params.early_stopping_min_improvement\n self.early_stopping_patience = self.params.early_stopping_patience\n self.train_epoch_perfs = np.zeros(self.params.max_epochs)\n self.valid_epoch_perfs = np.zeros(self.params.max_epochs)\n self.test_epoch_perfs = np.zeros(self.params.max_epochs)\n self.train_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.valid_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.test_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.model_choice_scores = np.zeros(self.params.max_epochs)\n\n self.train_perf_data = []\n self.valid_perf_data = []\n self.test_perf_data = []\n\n for ei in range(self.params.max_epochs):\n self.train_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'train'))\n self.valid_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid'))\n self.test_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test'))\n\n test_dset = pipeline.data.test_dset\n\n time_limit = int(self.params.slurm_time_limit)\n training_start = time.time()\n\n train_dset, valid_dset = pipeline.data.train_valid_dsets[0]\n for ei in range(self.params.max_epochs):\n if llnl_utils.is_lc_system() and (ei > 0):\n # If we're running on an LC system, check that we have enough time to complete another epoch\n # before the current job finishes, by extrapolating from the time elapsed so far.\n\n now = time.time() \n elapsed_time = now - pipeline.start_time\n training_time = now - training_start\n time_remaining = time_limit * 60 - elapsed_time\n time_needed = training_time/ei\n\n if time_needed > 0.9 * time_remaining:\n self.log.warn(\"Projected time to finish one more epoch exceeds time left in job; cutting training to %d epochs\" %\n ei)\n self.params.max_epochs = ei\n break\n\n # Train the model for one epoch. We turn off automatic checkpointing, so the last checkpoint\n # saved will be the one we created intentionally when we reached a new best validation score.\n self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0)\n train_pred = self.model.predict(train_dset, [])\n valid_pred = self.model.predict(valid_dset, [])\n test_pred = self.model.predict(test_dset, [])\n\n train_perf = self.train_perf_data[ei].accumulate_preds(train_pred, train_dset.ids)\n valid_perf = self.valid_perf_data[ei].accumulate_preds(valid_pred, valid_dset.ids)\n test_perf = self.test_perf_data[ei].accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f\" % (\n ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf,\n pipeline.metric_type, test_perf))\n\n # Compute performance metrics for each subset, and check if we've reached a new best validation set score\n\n self.train_epoch_perfs[ei], _ = self.train_perf_data[ei].compute_perf_metrics()\n self.valid_epoch_perfs[ei], _ = self.valid_perf_data[ei].compute_perf_metrics()\n self.test_epoch_perfs[ei], _ = self.test_perf_data[ei].compute_perf_metrics()\n valid_score = self.valid_perf_data[ei].model_choice_score(self.params.model_choice_score_type)\n self.model_choice_scores[ei] = valid_score\n self.num_epochs_trained = ei + 1\n if self.best_valid_score is None:\n self.model.save_checkpoint()\n self.best_valid_score = valid_score\n self.best_epoch = ei\n elif valid_score - self.best_valid_score > self.early_stopping_min_improvement:\n # Save a new checkpoint\n self.model.save_checkpoint()\n self.best_valid_score = valid_score\n self.best_epoch = ei\n elif ei - self.best_epoch > self.early_stopping_patience:\n self.log.info(f\"No improvement after {self.early_stopping_patience} epochs, stopping training\")\n break\n\n # Revert to last checkpoint\n dc_restore(self.model)\n self.model_save()\n\n # Only copy the model files we need, not the entire directory\n self._copy_model(self.best_model_dir)\n self.log.info(f\"Best model from epoch {self.best_epoch} saved to {self.best_model_dir}\")\n\n\n\n # ****************************************************************************************\n def train_kfold_cv(self, pipeline):\n \"\"\"Trains a neural net model with K-fold cross-validation for a specified number of epochs.\n Finds the epoch with the best validation set performance averaged over folds, then refits \n a model for the same number of epochs to the combined training and validation data.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Side effects:\n Sets the following attributes for DCNNModelWrapper:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n train_perf_data (list of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (list of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array): Contains a standard training set performance metric (r2_score or roc_auc), averaged over folds,\n at the end of each epoch.\n\n valid_epoch_perfs (np.array): Contains a standard validation set performance metric (r2_score or roc_auc), averaged over folds,\n at the end of each epoch.\n \"\"\"\n # TODO: Fix docstrings above\n num_folds = len(pipeline.data.train_valid_dsets)\n self.data = pipeline.data\n self.best_epoch = 0\n self.best_valid_score = None\n self.train_epoch_perfs = np.zeros(self.params.max_epochs)\n self.valid_epoch_perfs = np.zeros(self.params.max_epochs)\n self.test_epoch_perfs = np.zeros(self.params.max_epochs)\n self.train_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.valid_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.test_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.model_choice_scores = np.zeros(self.params.max_epochs)\n self.early_stopping_min_improvement = self.params.early_stopping_min_improvement\n self.early_stopping_patience = self.params.early_stopping_patience\n\n\n # Create PerfData structures for computing cross-validation metrics\n self.valid_perf_data = []\n for ei in range(self.params.max_epochs):\n self.valid_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid'))\n\n test_dset = pipeline.data.test_dset\n\n time_limit = int(self.params.slurm_time_limit)\n training_start = time.time()\n\n # Train a separate model for each fold\n models = []\n for k in range(num_folds):\n models.append(self.recreate_model())\n\n for ei in range(self.params.max_epochs):\n \n if llnl_utils.is_lc_system() and (ei > 0):\n # If we're running on an LC system, check that we have enough time to complete another epoch\n # across all folds, plus rerun the training, before the current job finishes, by \n # extrapolating from the time elapsed so far.\n \n now = time.time() \n elapsed_time = now - pipeline.start_time\n training_time = now - training_start\n time_remaining = time_limit * 60 - elapsed_time\n\n # epochs_remaining is how many epochs we have to run if we do one more across all folds,\n # then do self.best_epoch+1 epochs on the combined training & validation set, allowing for the\n # possibility that the next epoch may be the best one.\n\n epochs_remaining = ei + 2\n time_per_epoch = training_time/ei\n time_needed = epochs_remaining * time_per_epoch\n \n if time_needed > 0.9 * time_remaining:\n self.log.warn('Projected time to finish one more epoch exceeds time left in job; cutting training to %d epochs' % ei)\n self.params.max_epochs = ei\n break\n\n\n # Create PerfData structures that are only used within loop to compute metrics during initial training\n train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'train')\n test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test')\n for k in range(num_folds):\n self.model = models[k]\n train_dset, valid_dset = pipeline.data.train_valid_dsets[k]\n\n # We turn off automatic checkpointing - we only want to save a checkpoints for the final model.\n self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0, restore=False)\n train_pred = self.model.predict(train_dset, [])\n valid_pred = self.model.predict(valid_dset, [])\n test_pred = self.model.predict(test_dset, [])\n\n train_perf = train_perf_data.accumulate_preds(train_pred, train_dset.ids)\n valid_perf = self.valid_perf_data[ei].accumulate_preds(valid_pred, valid_dset.ids)\n test_perf = test_perf_data.accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Fold %d, epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f\" % (\n k, ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf,\n pipeline.metric_type, test_perf))\n\n # Compute performance metrics for current epoch across validation sets for all folds, and update\n # the best_epoch and best score if the new score exceeds the previous best score by a specified\n # threshold.\n\n self.valid_epoch_perfs[ei], self.valid_epoch_perf_stds[ei] = self.valid_perf_data[ei].compute_perf_metrics()\n valid_score = self.valid_perf_data[ei].model_choice_score(self.params.model_choice_score_type)\n self.model_choice_scores[ei] = valid_score\n self.num_epochs_trained = ei + 1\n if self.best_valid_score is None:\n self.best_valid_score = valid_score\n self.best_epoch = ei\n self.log.info(f\"Total cross-validation score for epoch {ei} is {valid_score:.3}\")\n elif valid_score - self.best_valid_score > self.early_stopping_min_improvement:\n self.best_valid_score = valid_score\n self.best_epoch = ei\n self.log.info(f\"*** Total cross-validation score for epoch {ei} is {valid_score:.3}, is new maximum\")\n elif ei - self.best_epoch > self.early_stopping_patience:\n self.log.info(f\"No improvement after {self.early_stopping_patience} epochs, stopping training\")\n break\n else:\n self.log.info(f\"Total cross-validation score for epoch {ei} is {valid_score:.3}\")\n\n # Train a new model for best_epoch epochs on the combined training/validation set. Compute the training and test\n # set metrics at each epoch.\n fit_dataset = pipeline.data.combined_training_data()\n retrain_start = time.time()\n self.model = self.recreate_model()\n self.log.info(f\"Best epoch was {self.best_epoch}, retraining with combined training/validation set\")\n\n self.train_perf_data = []\n self.test_perf_data = []\n for ei in range(self.best_epoch+1):\n self.train_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'train_valid'))\n self.test_perf_data.append(perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test'))\n\n self.model.fit(fit_dataset, nb_epoch=1, checkpoint_interval=0, restore=False)\n train_pred = self.model.predict(fit_dataset, [])\n test_pred = self.model.predict(test_dset, [])\n train_perf = self.train_perf_data[ei].accumulate_preds(train_pred, fit_dataset.ids)\n test_perf = self.test_perf_data[ei].accumulate_preds(test_pred, test_dset.ids)\n self.log.info(f\"Combined folds: Epoch {ei}, training {pipeline.metric_type} = {train_perf:.3},\"\n + f\"test {pipeline.metric_type} = {test_perf:.3}\")\n self.train_epoch_perfs[ei], self.train_epoch_perf_stds[ei] = self.train_perf_data[ei].compute_perf_metrics()\n self.test_epoch_perfs[ei], self.test_epoch_perf_stds[ei] = self.test_perf_data[ei].compute_perf_metrics()\n self.model.save_checkpoint()\n self.model_save()\n\n # Only copy the model files we need, not the entire directory\n self._copy_model(self.best_model_dir)\n retrain_time = time.time() - retrain_start\n self.log.info(\"Time to retrain model for %d epochs: %.1f seconds, %.1f sec/epoch\" % (self.best_epoch, retrain_time, \n retrain_time/self.best_epoch))\n\n # ****************************************************************************************\n def _copy_model(self, dest_dir):\n \"\"\"Copies the files needed to recreate a DeepChem NN model from the current model\n directory to a destination directory.\n\n Args:\n dest_dir (str): The destination directory for the model files\n \"\"\"\n\n chkpt_file = os.path.join(self.model_dir, 'checkpoint')\n with open(chkpt_file, 'r') as chkpt_in:\n chkpt_dict = yaml.load(chkpt_in.read())\n chkpt_prefix = chkpt_dict['model_checkpoint_path']\n files = [chkpt_file]\n # files.append(os.path.join(self.model_dir, 'model.pickle'))\n files.append(os.path.join(self.model_dir, '%s.index' % chkpt_prefix))\n # files.append(os.path.join(self.model_dir, '%s.meta' % chkpt_prefix))\n files = files + glob.glob(os.path.join(self.model_dir, '%s.data-*' % chkpt_prefix))\n self._clean_up_excess_files(dest_dir)\n for file in files:\n shutil.copy2(file, dest_dir)\n self.log.info(\"Saved model files to '%s'\" % dest_dir)\n\n\n # ****************************************************************************************\n def reload_model(self, reload_dir):\n \"\"\"Loads a saved neural net model from the specified directory.\n\n Args:\n reload_dir (str): Directory where saved model is located.\n model_dataset (ModelDataset Object): contains the current full dataset\n\n Side effects:\n Resets the value of model, transformers, and transformers_x\n \"\"\"\n if self.params.featurizer == 'graphconv':\n self.model = dc.models.GraphConvModel(\n n_tasks=self.params.num_model_tasks,\n n_features=self.get_num_features(),\n batch_size=self.params.batch_size,\n model_dir=reload_dir,\n uncertainty=self.params.uncertainty,\n graph_conv_layers=self.params.layer_sizes[:-1],\n dense_layer_size=self.params.layer_sizes[-1],\n dropout=self.params.dropouts,\n learning_rate=self.params.learning_rate,\n mode=self.params.prediction_type)\n elif self.params.prediction_type == 'regression':\n self.model = MultitaskRegressor(\n self.params.num_model_tasks,\n n_features=self.get_num_features(),\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n model_dir=reload_dir,\n learning_rate=self.params.learning_rate,\n uncertainty=self.params.uncertainty)\n else:\n self.model = MultitaskClassifier(\n self.params.num_model_tasks,\n n_features=self.get_num_features(),\n layer_sizes=self.params.layer_sizes,\n dropouts=self.params.dropouts,\n weight_init_stddevs=self.params.weight_init_stddevs,\n bias_init_consts=self.params.bias_init_consts,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type,\n model_dir=reload_dir,\n learning_rate=self.params.learning_rate,\n n_classes=self.params.class_number)\n # Hack to run models trained in DeepChem 2.1 with DeepChem 2.2\n # self.model.default_outputs = self.model.outputs\n # Get latest checkpoint path transposed to current model dir\n ckpt = tf.train.get_checkpoint_state(reload_dir)\n if os.path.exists(f\"{ckpt.model_checkpoint_path}.index\"):\n checkpoint = ckpt.model_checkpoint_path\n else:\n checkpoint = os.path.join(reload_dir, os.path.basename(ckpt.model_checkpoint_path))\n dc_restore(self.model, checkpoint=checkpoint)\n\n\n # Restore the transformers from the datastore or filesystem\n self.reload_transformers()\n\n\n # ****************************************************************************************\n def get_pred_results(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset. subset may be 'train', 'valid', 'test'\n accordingly. epoch_label indicates the training epoch we want results for; currently the\n only option for this is 'best'. Results are returned as a dictionary of parameter, value pairs.\n\n Args:\n subset (str): Label for the current subset of the dataset (choices ['train','valid','test','full'])\n\n epoch_label (str): Label for the training epoch we want results for (choices ['best'])\n\n Returns:\n dict: A dictionary of parameter/ value pairs of the prediction values and results of the dataset subset\n\n Raises:\n ValueError: if epoch_label not in ['best']\n\n ValueError: If subset not in ['train','valid','test','full']\n \"\"\"\n if subset == 'full':\n return self.get_full_dataset_pred_results(self.data)\n if epoch_label == 'best':\n epoch = self.best_epoch\n model_dir = self.best_model_dir\n else:\n raise ValueError(\"Unknown epoch_label '%s'\" % epoch_label)\n if subset == 'train':\n return self.get_train_valid_pred_results(self.train_perf_data[epoch])\n elif subset == 'valid':\n return self.get_train_valid_pred_results(self.valid_perf_data[epoch])\n elif subset == 'test':\n return self.get_train_valid_pred_results(self.test_perf_data[epoch])\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def get_perf_data(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset. subset may be 'train', 'valid', 'test' or 'full',\n epoch_label indicates the training epoch we want results for; currently the\n only option for this is 'best'. Results are returned as a PerfData object of the appropriate class \n for the model's split strategy and prediction type.\n\n Args:\n subset (str): Label for the current subset of the dataset (choices ['train','valid','test','full'])\n\n epoch_label (str): Label for the training epoch we want results for (choices ['best'])\n\n Returns:\n PerfData object: Performance object pulled from the appropriate subset\n\n Raises:\n ValueError: if epoch_label not in ['best']\n\n ValueError: If subset not in ['train','valid','test','full']\n \"\"\"\n\n if subset == 'full':\n return self.get_full_dataset_perf_data(self.data)\n if epoch_label == 'best':\n epoch = self.best_epoch\n model_dir = self.best_model_dir\n else:\n raise ValueError(\"Unknown epoch_label '%s'\" % epoch_label)\n\n if subset == 'train':\n return self.train_perf_data[epoch]\n elif subset == 'valid':\n return self.valid_perf_data[epoch]\n elif subset == 'test':\n #return self.get_test_perf_data(model_dir, self.data)\n return self.test_perf_data[epoch]\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n\n\n # ****************************************************************************************\n def generate_predictions(self, dataset):\n \"\"\"Generates predictions for specified dataset with current model, as well as standard deviations\n if params.uncertainty=True\n\n Args:\n dataset: the deepchem DiskDataset to generate predictions for\n\n Returns:\n (pred, std): tuple of predictions for compounds and standard deviation estimates, if requested.\n Each element of tuple is a numpy array of shape (ncmpds, ntasks, nclasses), where nclasses = 1 for regression\n models.\n \"\"\"\n pred, std = None, None\n self.log.info(\"Predicting values for current model\")\n\n # For deepchem's predict_uncertainty function, you are not allowed to specify transformers. That means that the\n # predictions are being made in the transformed space, not the original space. We call undo_transforms() to generate\n # the transformed predictions. To transform the standard deviations, we rely on the fact that at present we only use\n # dc.trans.NormalizationTransformer (which centers and scales the data).\n\n # Uncertainty is now supported by DeepChem's GraphConv, at least for regression models.\n # if self.params.uncertainty and self.params.prediction_type == 'regression' and self.params.featurizer != 'graphconv':\n\n # Current (2.1) DeepChem neural net classification models don't support uncertainties.\n if self.params.uncertainty and self.params.prediction_type == 'classification':\n self.log.warning(\"Warning: DeepChem neural net models support uncertainty for regression only.\")\n \n if self.params.uncertainty and self.params.prediction_type == 'regression':\n # For multitask, predict_uncertainty returns a list of (pred, std) tuples, one for each task.\n # For singletask, it returns one tuple. Convert the result into a pair of ndarrays of shape (ncmpds, ntasks, nclasses).\n pred_std = self.model.predict_uncertainty(dataset)\n if type(pred_std) == tuple:\n #JEA\n #ntasks = 1\n ntasks = len(pred_std[0][0])\n pred, std = pred_std\n pred = pred.reshape((pred.shape[0], 1, pred.shape[1]))\n std = std.reshape(pred.shape)\n else:\n ntasks = len(pred_std)\n pred0, std0 = pred_std[0]\n ncmpds = pred0.shape[0]\n nclasses = pred0.shape[1]\n pred = np.concatenate([p.reshape((ncmpds, 1, nclasses)) for p, s in pred_std], axis=1)\n std = np.concatenate([s.reshape((ncmpds, 1, nclasses)) for p, s in pred_std], axis=1)\n\n if self.params.transformers and self.transformers is not None:\n # Transform the standard deviations, if we can. This is a bit of a hack, but it works for\n # NormalizationTransformer, since the standard deviations used to scale the data are\n # stored in the transformer object.\n if len(self.transformers) == 1 and (isinstance(self.transformers[0], dc.trans.NormalizationTransformer) or isinstance(self.transformers[0],trans.NormalizationTransformerMissingData)):\n y_stds = self.transformers[0].y_stds.reshape((1,ntasks,1))\n std = std / y_stds\n pred = dc.trans.undo_transforms(pred, self.transformers)\n elif self.params.transformers and self.transformers is not None:\n pred = self.model.predict(dataset, self.transformers)\n if self.params.prediction_type == 'regression':\n pred = pred.reshape((pred.shape[0], pred.shape[1], 1))\n else:\n pred = self.model.predict(dataset, [])\n if self.params.prediction_type == 'regression':\n pred = pred.reshape((pred.shape[0], pred.shape[1], 1))\n return pred, std\n\n # ****************************************************************************************\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to neural network models.\n\n Returns:\n model_spec_metdata (dict): A dictionary of the parameter sets for the DCNNModelWrapper object.\n Parameters are saved under the key 'nn_specific' as a subdictionary.\n \"\"\"\n nn_metadata = dict(\n best_epoch = self.best_epoch,\n max_epochs = self.params.max_epochs,\n batch_size = self.params.batch_size,\n optimizer_type = self.params.optimizer_type,\n layer_sizes = self.params.layer_sizes,\n dropouts = self.params.dropouts,\n weight_init_stddevs = self.params.weight_init_stddevs,\n bias_init_consts = self.params.bias_init_consts,\n learning_rate = self.params.learning_rate,\n weight_decay_penalty=self.params.weight_decay_penalty,\n weight_decay_penalty_type=self.params.weight_decay_penalty_type\n )\n model_spec_metadata = dict(nn_specific = nn_metadata)\n return model_spec_metadata\n\n # ****************************************************************************************\n def _clean_up_excess_files(self, dest_dir):\n \"\"\"\n Function to clean up extra model files left behind in the training process.\n Only removes self.model_dir\n \"\"\"\n if os.path.exists(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n \n# ****************************************************************************************\nclass HybridModelWrapper(ModelWrapper):\n \"\"\"A wrapper for hybrid models, contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n model_dir (str): The subdirectory under output_dir that contains the model. Created in setup_model_dirs.\n\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n\n model: The PyTorch NN sequential model.\n Created in train:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Initialized as None, keeps track of the epoch with the best validation score\n\n train_perf_data (np.array of PerfData): Initialized as an empty array, \n contains the predictions and performance of the training dataset\n\n valid_perf_data (np.array of PerfData): Initialized as an empty array,\n contains the predictions and performance of the validation dataset\n\n train_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the training dataset\n\n valid_epoch_perfs (np.array of dicts): Initialized as an empty array,\n contains a list of dictionaries of predicted values and metrics on the validation dataset\n\n \"\"\"\n\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes HybridModelWrapper object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurizer object): initialized outside of model_wrapper\n\n Side effects:\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n\n featurziation (Featurization object): The featurization object created outside of model_wrapper\n\n log (log): The logger\n\n output_dir (str): The parent path of the model directory\n\n transforsamers (list): Initialized as an empty list, stores the transformers on the response col\n\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n\n model: dc.models.TorchModel\n \"\"\"\n super().__init__(params, featurizer, ds_client)\n if self.params.layer_sizes is None:\n if self.params.featurizer == 'ecfp':\n self.params.layer_sizes = [1000, 500]\n elif self.params.featurizer in ['descriptors', 'computed_descriptors']:\n self.params.layer_sizes = [200, 100]\n else:\n # Shouldn't happen\n self.log.warning(\"You need to define default layer sizes for featurizer %s\" %\n self.params.featurizer)\n self.params.layer_sizes = [1000, 500]\n\n if self.params.dropouts is None:\n self.params.dropouts = [0.4] * len(self.params.layer_sizes)\n\n n_features = self.get_num_features()\n if socket.gethostname()[:3] == \"sur\":\n self.dev = torch.device(\"cpu\")\n else:\n self.dev = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n if self.params.prediction_type == 'regression':\n model_dict = OrderedDict([\n (\"layer1\", torch.nn.Linear(n_features, self.params.layer_sizes[0]).to(self.dev)),\n (\"dp1\", torch.nn.Dropout(p=self.params.dropouts[0]).to(self.dev)),\n (\"relu1\", torch.nn.ReLU().to(self.dev))\n ])\n \n if len(self.params.layer_sizes) > 1:\n for i in range(1, len(self.params.layer_sizes)):\n model_dict[f\"layer{i+1}\"] = torch.nn.Linear(self.params.layer_sizes[i-1], self.params.layer_sizes[i]).to(self.dev)\n model_dict[f\"dp{i+1}\"] = torch.nn.Dropout(p=self.params.dropouts[i]).to(self.dev)\n model_dict[f\"relu{i+1}\"] = torch.nn.ReLU().to(self.dev)\n \n model_dict[\"last_layer\"] = torch.nn.Linear(self.params.layer_sizes[-1], 1).to(self.dev)\n \n self.model_dict = model_dict\n self.model = torch.nn.Sequential(model_dict).to(self.dev)\n else:\n raise Exception(\"Hybrid model only support regression prediction.\")\n\n def _predict_binding(self, activity, conc):\n \"\"\"\n Predict measurements of fractional binding/inhibition of target receptors by a compound with the given activity,\n in -Log scale, at the specified concentration in nM. If the given activity is pKi, a ratio to convert Ki into IC50\n is needed. It can be the ratio of concentration and Kd of the radioligand in a competitive binding assay, or the concentration\n of the substrate and Michaelis constant (Km) of enzymatic inhibition assay.\n \"\"\"\n \n if self.params.is_ki:\n if self.params.ki_convert_ratio is None:\n raise Exception(\"Ki converting ratio is missing. Cannot convert Ki into IC50\")\n Ki = 10**(9-activity)\n IC50 = Ki * (1 + self.params.ki_convert_ratio)\n else:\n IC50 = 10**(9-activity)\n pred_frac = 1.0/(1.0 + IC50/conc)\n \n return pred_frac\n\n def _l2_loss(self, yp, yr):\n \"\"\"\n Da's loss function, based on L2 terms for both pKi and percent binding values\n This function is not appropriate for model fitting, but can be used for R^2 calculation.\n \"\"\"\n yreal = yr.to(\"cpu\").numpy()\n pos_ki = np.where(np.isnan(yreal[:,1]))[0]\n pos_bind = np.where(~np.isnan(yreal[:,1]))[0]\n loss_ki = torch.sum((yp[pos_ki, 0] - yr[pos_ki, 0]) ** 2)\n if len(pos_bind[0]) == 0:\n return loss_ki, torch.tensor(0.0, dtype=torch.float32)\n # convert Ki to % binding\n y_stds = self.transformers[0].y_stds\n y_means = self.transformers[0].y_means\n if self.params.is_ki:\n bind_pred = self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1])\n else:\n bind_pred = self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1])\n # calculate the loss_bind\n loss_bind = torch.sum((bind_pred - yr[pos_bind, 0]) ** 2)\n return loss_ki, loss_bind\n\n def _poisson_hybrid_loss(self, yp, yr):\n \"\"\"\n Hybrid loss function based on L2 losses for deviations of predicted and measured pKi values\n and Poisson losses for predicted vs measured binding values. The idea is to choose loss terms\n that when minimized maximize the likelihood.\n\n Note that we compute both pKi and binding loss terms for compounds that have both kinds of data, since they are\n independent measurements. Therefore, pos_ki and pos_bind index sets may overlap.\n \"\"\"\n\n # Get indices of non-missing pKi values\n yreal = yr.to(\"cpu\").numpy()\n pos_ki = np.where(np.isnan(yreal[:,1]))[0]\n # Get indices of non-missing binding values\n pos_bind = np.where(~np.isnan(yreal[:,1]))[0]\n\n # Compute L2 loss for pKi predictions\n loss_ki = torch.sum((yp[pos_ki, 0] - yr[pos_ki, 0]) ** 2)\n #convert the ki prediction back to Ki scale\n y_stds = self.transformers[0].y_stds\n y_means = self.transformers[0].y_means\n # Compute fraction bound to *radioligand* (not drug) from predicted pKi\n if self.params.is_ki:\n rl_bind_pred = 1 - self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1])\n else:\n rl_bind_pred = 1 - self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1])\n rl_bind_real = 1 - yr[pos_bind, 0]\n # Compute Poisson loss for radioligand binding\n loss_bind = torch.sum(rl_bind_pred - rl_bind_real * torch.log(rl_bind_pred))\n\n if np.isnan(loss_ki.item()):\n raise Exception(\"Ki loss is NaN\")\n if np.isnan(loss_bind.item()):\n raise Exception(\"Binding loss is NaN\")\n return loss_ki, loss_bind\n\n def _loss_batch(self, loss_func, xb, yb, opt=None):\n \"\"\"\n Compute loss_func for the batch xb, yb. If opt is provided, perform a training\n step on the model weights.\n \"\"\"\n\n loss_ki, loss_bind = self.loss_func(self.model(xb), yb)\n loss = loss_ki + loss_bind\n \n if opt is not None:\n loss.backward()\n opt.step() \n opt.zero_grad()\n\n return loss_ki.item(), loss_bind.item(), len(xb)\n\n class SubsetData(object):\n \"\"\"\n Container for DataLoader object and attributes of a dataset subset\n \"\"\"\n def __init__(self, ds, dl, n_ki, n_bind):\n self.ds = ds\n self.dl = dl\n self.n_ki = n_ki\n self.n_bind = n_bind\n \n def _tensorize(self, x):\n return torch.tensor(x, dtype=torch.float32)\n\n def _load_hybrid_data(self, data):\n \"\"\"\n Convert the DeepChem dataset into the SubsetData for hybrid model.\n \"\"\"\n self.train_valid_dsets = []\n test_dset = data.test_dset\n num_folds = len(data.train_valid_dsets)\n\n for k in range(num_folds):\n train_dset, valid_dset = data.train_valid_dsets[k]\n # datasets were normalized in previous steps\n x_train, y_train, x_valid, y_valid = map(\n self._tensorize, (train_dset.X, train_dset.y, valid_dset.X, valid_dset.y)\n )\n # train\n train_ki_pos = np.where(np.isnan(y_train[:,1].numpy()))[0]\n train_bind_pos = np.where(~np.isnan(y_train[:,1].numpy()))[0]\n \n # valid\n valid_ki_pos = np.where(np.isnan(y_valid[:,1].numpy()))[0]\n valid_bind_pos = np.where(~np.isnan(y_valid[:,1].numpy()))[0]\n \n train_ds = TensorDataset(x_train, y_train)\n train_dl = DataLoader(train_ds, batch_size=self.params.batch_size, shuffle=True, pin_memory=True)\n train_data = self.SubsetData(train_ds, \n train_dl, \n len(train_ki_pos), \n len(train_bind_pos))\n\n valid_ds = TensorDataset(x_valid, y_valid)\n valid_dl = DataLoader(valid_ds, batch_size=self.params.batch_size * 2, pin_memory=True)\n valid_data = self.SubsetData(valid_ds, \n valid_dl, \n len(valid_ki_pos), \n len(valid_bind_pos))\n\n self.train_valid_dsets.append((train_data, valid_data))\n\n x_test, y_test = map(\n self._tensorize, (test_dset.X, test_dset.y)\n )\n test_ki_pos = np.where(np.isnan(y_test[:,1].numpy()))[0]\n test_bind_pos = np.where(~np.isnan(y_test[:,1].numpy()))[0]\n\n test_ds = TensorDataset(x_test, y_test)\n test_dl = DataLoader(test_ds, batch_size=self.params.batch_size * 2, pin_memory=True)\n test_data = self.SubsetData(test_ds, \n test_dl, \n len(test_ki_pos), \n len(test_bind_pos))\n\n self.test_data = test_data\n \n def save_model(self, checkpoint_file, model, opt, epoch, model_dict):\n \"\"\"\n Save a model to a checkpoint file.\n Include epoch, model_dict in checkpoint dict.\n \"\"\"\n checkpoint = dict(\n epoch=epoch,\n model_state_dict=model.state_dict(),\n opt_state_dict=opt.state_dict(),\n model_dict=model_dict\n )\n \n torch.save(checkpoint, checkpoint_file)\n\n def train(self, pipeline):\n self.best_epoch = 0\n self.best_valid_score = None\n self.train_epoch_perfs = np.zeros(self.params.max_epochs)\n self.valid_epoch_perfs = np.zeros(self.params.max_epochs)\n self.test_epoch_perfs = np.zeros(self.params.max_epochs)\n self.train_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.valid_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.test_epoch_perf_stds = np.zeros(self.params.max_epochs)\n self.model_choice_scores = np.zeros(self.params.max_epochs)\n self.early_stopping_min_improvement = self.params.early_stopping_min_improvement\n self.early_stopping_patience = self.params.early_stopping_patience\n\n if self.params.loss_func.lower() == \"poisson\":\n self.loss_func = self._poisson_hybrid_loss\n else:\n self.loss_func = self._l2_loss\n\n # load hybrid data\n self._load_hybrid_data(pipeline.data)\n\n checkpoint_file = os.path.join(self.model_dir, f\"{self.params.dataset_name}_model_{self.params.model_uuid}.pt\")\n\n opt = torch.optim.Adam(self.model.parameters(), lr=self.params.learning_rate)\n self.train_perf_data = []\n self.valid_perf_data = []\n self.test_perf_data = []\n for ei in range(self.params.max_epochs):\n self.train_perf_data.append(perf.create_perf_data(\"hybrid\", pipeline.data, self.transformers, 'train', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio))\n self.valid_perf_data.append(perf.create_perf_data(\"hybrid\", pipeline.data, self.transformers, 'valid', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio))\n self.test_perf_data.append(perf.create_perf_data(\"hybrid\", pipeline.data, self.transformers, 'test', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio))\n\n test_data = self.test_data\n\n time_limit = int(self.params.slurm_time_limit)\n training_start = time.time()\n\n train_dset, valid_dset = pipeline.data.train_valid_dsets[0]\n if len(pipeline.data.train_valid_dsets) > 1:\n raise Exception(\"Currently the hybrid model doesn't support K-fold cross validation splitting.\")\n test_dset = pipeline.data.test_dset\n train_data, valid_data = self.train_valid_dsets[0]\n for ei in range(self.params.max_epochs):\n if llnl_utils.is_lc_system() and (ei > 0):\n # If we're running on an LC system, check that we have enough time to complete another epoch\n # before the current job finishes, by extrapolating from the time elapsed so far.\n\n now = time.time() \n elapsed_time = now - pipeline.start_time\n training_time = now - training_start\n time_remaining = time_limit * 60 - elapsed_time\n time_needed = training_time/ei\n\n if time_needed > 0.9 * time_remaining:\n self.log.warn(\"Projected time to finish one more epoch exceeds time left in job; cutting training to %d epochs\" %\n ei)\n self.params.max_epochs = ei\n break\n\n # Train the model for one epoch. We turn off automatic checkpointing, so the last checkpoint\n # saved will be the one we created intentionally when we reached a new best validation score.\n train_loss_ep = 0\n self.model.train()\n for i, (xb, yb) in enumerate(train_data.dl):\n xb = xb.to(self.dev)\n yb = yb.to(self.dev)\n train_loss_ki, train_loss_bind, train_count = self._loss_batch(self.loss_func, xb, yb, opt)\n train_loss_ep += (train_loss_ki + train_loss_bind)\n train_loss_ep /= (train_data.n_ki + train_data.n_bind)\n\n # validation set\n with torch.no_grad():\n valid_loss_ep = 0\n for xb, yb in valid_data.dl:\n xb = xb.to(self.dev)\n yb = yb.to(self.dev)\n valid_loss_ki, valid_loss_bind, valid_count = self._loss_batch(self.loss_func, xb, yb)\n valid_loss_ep += (valid_loss_ki + valid_loss_bind)\n valid_loss_ep /= (valid_data.n_ki + valid_data.n_bind)\n\n train_pred, _ = self.generate_predictions(train_dset)\n valid_pred, _ = self.generate_predictions(valid_dset)\n test_pred, _ = self.generate_predictions(test_dset)\n\n train_perf = self.train_perf_data[ei].accumulate_preds(train_pred, train_dset.ids)\n valid_perf = self.valid_perf_data[ei].accumulate_preds(valid_pred, valid_dset.ids)\n test_perf = self.test_perf_data[ei].accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Epoch %d: training %s = %.3f, training loss = %.3f, validation %s = %.3f, validation loss = %.3f, test %s = %.3f\" % (\n ei, pipeline.metric_type, train_perf, train_loss_ep, pipeline.metric_type, valid_perf, valid_loss_ep,\n pipeline.metric_type, test_perf))\n\n # Compute performance metrics for each subset, and check if we've reached a new best validation set score\n\n self.train_epoch_perfs[ei], _ = self.train_perf_data[ei].compute_perf_metrics()\n self.valid_epoch_perfs[ei], _ = self.valid_perf_data[ei].compute_perf_metrics()\n self.test_epoch_perfs[ei], _ = self.test_perf_data[ei].compute_perf_metrics()\n valid_score = self.valid_perf_data[ei].model_choice_score(self.params.model_choice_score_type)\n self.model_choice_scores[ei] = valid_score\n self.num_epochs_trained = ei + 1\n if self.best_valid_score is None:\n self.save_model(checkpoint_file, self.model, opt, ei, self.model_dict)\n self.best_valid_score = valid_score\n self.best_epoch = ei\n elif valid_score - self.best_valid_score > self.early_stopping_min_improvement:\n # Save a new checkpoint\n self.save_model(checkpoint_file, self.model, opt, ei, self.model_dict)\n self.best_valid_score = valid_score\n self.best_epoch = ei\n elif ei - self.best_epoch > self.early_stopping_patience:\n self.log.info(f\"No improvement after {self.early_stopping_patience} epochs, stopping training\")\n break\n\n # Revert to last checkpoint\n checkpoint = torch.load(checkpoint_file)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n opt.load_state_dict(checkpoint['opt_state_dict'])\n\n # copy the best model checkpoint file\n self._clean_up_excess_files(self.best_model_dir)\n shutil.copy2(checkpoint_file, self.best_model_dir)\n self.log.info(f\"Best model from epoch {self.best_epoch} saved to {self.model_dir}\")\n\n # ****************************************************************************************\n def reload_model(self, reload_dir):\n \"\"\"Loads a saved neural net model from the specified directory.\n\n Args:\n reload_dir (str): Directory where saved model is located.\n model_dataset (ModelDataset Object): contains the current full dataset\n\n Side effects:\n Resets the value of model, transformers, and transformers_x\n \"\"\"\n \n checkpoint_file = os.path.join(reload_dir, f\"{self.params.dataset_name}_model_{self.params.model_uuid}.pt\")\n if os.path.isfile(checkpoint_file):\n checkpoint = torch.load(checkpoint_file)\n self.best_epoch = checkpoint[\"epoch\"]\n self.model = torch.nn.Sequential(checkpoint[\"model_dict\"]).to(self.dev)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.eval()\n else:\n raise Exception(f\"Checkpoint file doesn't exist in the reload_dir {reload_dir}\")\n \n # Restore the transformers from the datastore or filesystem\n self.reload_transformers()\n\n\n # ****************************************************************************************\n def get_pred_results(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset. subset may be 'train', 'valid', 'test'\n accordingly. epoch_label indicates the training epoch we want results for; currently the\n only option for this is 'best'. Results are returned as a dictionary of parameter, value pairs.\n\n Args:\n subset (str): Label for the current subset of the dataset (choices ['train','valid','test','full'])\n\n epoch_label (str): Label for the training epoch we want results for (choices ['best'])\n\n Returns:\n dict: A dictionary of parameter/ value pairs of the prediction values and results of the dataset subset\n\n Raises:\n ValueError: if epoch_label not in ['best']\n\n ValueError: If subset not in ['train','valid','test','full']\n \"\"\"\n if subset == 'full':\n return self.get_full_dataset_pred_results(self.data)\n if epoch_label == 'best':\n epoch = self.best_epoch\n model_dir = self.best_model_dir\n else:\n raise ValueError(\"Unknown epoch_label '%s'\" % epoch_label)\n if subset == 'train':\n return self.get_train_valid_pred_results(self.train_perf_data[epoch])\n elif subset == 'valid':\n return self.get_train_valid_pred_results(self.valid_perf_data[epoch])\n elif subset == 'test':\n return self.get_train_valid_pred_results(self.test_perf_data[epoch])\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def get_perf_data(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset. subset may be 'train', 'valid', 'test' or 'full',\n epoch_label indicates the training epoch we want results for; currently the\n only option for this is 'best'. Results are returned as a PerfData object of the appropriate class \n for the model's split strategy and prediction type.\n\n Args:\n subset (str): Label for the current subset of the dataset (choices ['train','valid','test','full'])\n\n epoch_label (str): Label for the training epoch we want results for (choices ['best'])\n\n Returns:\n PerfData object: Performance object pulled from the appropriate subset\n\n Raises:\n ValueError: if epoch_label not in ['best']\n\n ValueError: If subset not in ['train','valid','test','full']\n \"\"\"\n\n if subset == 'full':\n return self.get_full_dataset_perf_data(self.data)\n if epoch_label == 'best':\n epoch = self.best_epoch\n model_dir = self.best_model_dir\n else:\n raise ValueError(\"Unknown epoch_label '%s'\" % epoch_label)\n if subset == 'train':\n return self.train_perf_data[epoch]\n elif subset == 'valid':\n return self.valid_perf_data[epoch]\n elif subset == 'test':\n #return self.get_test_perf_data(model_dir, self.data)\n return self.test_perf_data[epoch]\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def generate_predictions(self, dataset):\n \"\"\"Generates predictions for specified dataset with current model, as well as standard deviations\n if params.uncertainty=True\n\n Args:\n dataset: the deepchem DiskDataset to generate predictions for\n\n Returns:\n (pred, std): tuple of predictions for compounds and standard deviation estimates, if requested.\n Each element of tuple is a numpy array of shape (ncmpds, ntasks, nclasses), where nclasses = 1 for regression\n models.\n \"\"\"\n pred, std = None, None\n self.log.info(\"Predicting values for current model\")\n\n x_data, y_data = map(\n self._tensorize, (dataset.X, dataset.y)\n )\n has_conc = len(y_data.shape) > 1 and y_data.shape[1] > 1 and np.nan_to_num(y_data[:,1]).max() > 0\n data_ki_pos = np.where(np.isnan(y_data[:,1].numpy()))[0] if has_conc else np.where(y_data[:,0].numpy())[0]\n data_bind_pos = np.where(~np.isnan(y_data[:,1].numpy()))[0] if has_conc else np.array([])\n\n data_ds = TensorDataset(x_data, y_data)\n data_dl = DataLoader(data_ds, batch_size=self.params.batch_size * 2, pin_memory=True)\n data_data = self.SubsetData(data_ds, \n data_dl, \n len(data_ki_pos), \n len(data_bind_pos))\n pred = []\n real = []\n for xb, yb in data_dl:\n xb = xb.to(self.dev)\n yb = yb.to(self.dev)\n yp = self.model(xb)\n for i in range(len(yb)):\n real.append(yb.to(\"cpu\").numpy()[i])\n pred.append(yp.detach().to(\"cpu\").numpy()[i])\n real = np.array(real)\n pred = np.array(pred)\n\n if self.params.transformers and self.transformers is not None:\n if has_conc:\n pred = np.concatenate((pred, real[:, [1]]), axis=1)\n pred = self.transformers[0].untransform(pred, isreal=False)\n pred_bind_pos = np.where(~np.isnan(pred[:, 1]))[0]\n pred[pred_bind_pos, 0] = self._predict_binding(pred[pred_bind_pos, 0], pred[pred_bind_pos, 1])\n else:\n pred = self.transformers[0].untransform(pred, isreal=False)\n else:\n if has_conc:\n pred = np.concatenate((pred, real[:, [1]]), axis=1)\n return pred, std\n\n # ****************************************************************************************\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to hybrid models.\n\n Returns:\n model_spec_metdata (dict): A dictionary of the parameter sets for the HybridModelWrapper object.\n Parameters are saved under the key 'hybrid_specific' as a subdictionary.\n \"\"\"\n nn_metadata = dict(\n best_epoch = self.best_epoch,\n max_epochs = self.params.max_epochs,\n batch_size = self.params.batch_size,\n layer_sizes = self.params.layer_sizes,\n dropouts = self.params.dropouts,\n learning_rate = self.params.learning_rate,\n )\n model_spec_metadata = dict(hybrid_specific = nn_metadata)\n return model_spec_metadata\n\n # ****************************************************************************************\n def _clean_up_excess_files(self, dest_dir):\n \"\"\"\n Function to clean up extra model files left behind in the training process.\n Does not apply to Hybrid model.\n \"\"\"\n if os.path.exists(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n\n# ****************************************************************************************\nclass DCRFModelWrapper(ModelWrapper):\n \"\"\"Contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n featurization (Featurization object): The featurization object created outside of model_wrapper\n log (log): The logger\n output_dir (str): The parent path of the model directory\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n model_dir (str): The subdirectory under output_dir that contains the model. Created in setup_model_dirs.\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n model: The dc.models.sklearn_models.SklearnModel as specified by the params attribute\n\n Created in train:\n data (ModelDataset): contains the dataset, set in pipeline\n best_epoch (int): Set to 0, not applicable to deepchem random forest models\n train_perf_data (PerfData): Contains the predictions and performance of the training dataset\n valid_perf_data (PerfData): Contains the predictions and performance of the validation dataset\n train_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n valid_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n\n \"\"\"\n\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes DCRFModelWrapper object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurization): Object managing the featurization of compounds\n ds_client: datastore client.\n \"\"\"\n super().__init__(params, featurizer, ds_client)\n self.best_model_dir = os.path.join(self.output_dir, 'best_model')\n self.model_dir = self.best_model_dir\n os.makedirs(self.best_model_dir, exist_ok=True)\n\n if self.params.prediction_type == 'regression':\n rf_model = RandomForestRegressor(n_estimators=self.params.rf_estimators,\n max_features=self.params.rf_max_features,\n max_depth=self.params.rf_max_depth,\n n_jobs=-1)\n else:\n rf_model = RandomForestClassifier(n_estimators=self.params.rf_estimators,\n max_features=self.params.rf_max_features,\n max_depth=self.params.rf_max_depth,\n n_jobs=-1)\n\n self.model = dc.models.sklearn_models.SklearnModel(rf_model, model_dir=self.best_model_dir)\n\n # ****************************************************************************************\n def train(self, pipeline):\n \"\"\"Trains a random forest model and saves the trained model.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Returns:\n None\n\n Side effects:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Set to 0, not applicable to deepchem random forest models\n\n train_perf_data (PerfData): Contains the predictions and performance of the training dataset\n\n valid_perf_data (PerfData): Contains the predictions and performance of the validation dataset\n\n train_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n\n valid_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n \"\"\"\n\n self.data = pipeline.data\n self.best_epoch = None\n self.train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers,'train')\n self.valid_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid')\n self.test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test')\n\n self.log.info(\"Fitting random forest model\")\n\n test_dset = pipeline.data.test_dset\n\n num_folds = len(pipeline.data.train_valid_dsets)\n for k in range(num_folds):\n train_dset, valid_dset = pipeline.data.train_valid_dsets[k]\n self.model.fit(train_dset)\n\n train_pred = self.model.predict(train_dset, [])\n train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids)\n\n valid_pred = self.model.predict(valid_dset, [])\n valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids)\n\n test_pred = self.model.predict(test_dset, [])\n test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Fold %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f\" % (\n k, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf,\n pipeline.metric_type, test_perf))\n\n\n # Compute mean and SD of performance metrics across validation sets for all folds\n self.train_perf, self.train_perf_std = self.train_perf_data.compute_perf_metrics()\n self.valid_perf, self.valid_perf_std = self.valid_perf_data.compute_perf_metrics()\n self.test_perf, self.test_perf_std = self.test_perf_data.compute_perf_metrics()\n\n # Compute score to be used for ranking model hyperparameter sets\n self.model_choice_score = self.valid_perf_data.model_choice_score(self.params.model_choice_score_type)\n\n if num_folds > 1:\n # For k-fold CV, retrain on the combined training and validation sets\n fit_dataset = self.data.combined_training_data()\n self.model.fit(fit_dataset, restore=False)\n self.model_save()\n # The best model is just the single RF training run.\n self.best_epoch = 0\n\n # ****************************************************************************************\n def reload_model(self, reload_dir):\n \"\"\"Loads a saved random forest model from the specified directory. Also loads any transformers that\n were saved with it.\n\n Args:\n reload_dir (str): Directory where saved model is located.\n\n model_dataset (ModelDataset Object): contains the current full dataset\n\n Side effects:\n Resets the value of model, transformers, and transformers_x\n\n \"\"\"\n if self.params.prediction_type == 'regression':\n rf_model = RandomForestRegressor(n_estimators=self.params.rf_estimators,\n max_features=self.params.rf_max_features,\n max_depth=self.params.rf_max_depth,\n n_jobs=-1)\n else:\n rf_model = RandomForestClassifier(n_estimators=self.params.rf_estimators,\n max_features=self.params.rf_max_features,\n max_depth=self.params.rf_max_depth,\n n_jobs=-1)\n\n # Restore the transformers from the datastore or filesystem\n self.reload_transformers()\n self.model = dc.models.sklearn_models.SklearnModel(rf_model, model_dir=reload_dir)\n self.model.reload()\n\n # ****************************************************************************************\n def get_pred_results(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset.\n\n Args:\n subset: 'train', 'valid', 'test' or 'full' accordingly.\n\n epoch_label: ignored; this function always returns the results for the current model.\n\n Returns:\n A dictionary of parameter, value pairs, in the format expected by the\n prediction_results element of the ModelMetrics data.\n\n Raises:\n ValueError: if subset not in ['train','valid','test','full']\n \n \"\"\"\n if subset == 'train':\n return self.get_train_valid_pred_results(self.train_perf_data)\n elif subset == 'valid':\n return self.get_train_valid_pred_results(self.valid_perf_data)\n elif subset == 'test':\n return self.get_train_valid_pred_results(self.test_perf_data)\n elif subset == 'full':\n return self.get_full_dataset_pred_results(self.data)\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n\n # ****************************************************************************************\n def get_perf_data(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset.\n\n Args:\n subset (str): may be 'train', 'valid', 'test' or 'full'\n epoch_label (not used in random forest, but kept as part of the method structure)\n\n Results:\n PerfData object: Subclass of perfdata object associated with the appropriate subset's split strategy and prediction type.\n\n Raises:\n ValueError: if subset not in ['train','valid','test','full']\n \"\"\"\n if subset == 'train':\n return self.train_perf_data\n elif subset == 'valid':\n return self.valid_perf_data\n elif subset == 'test':\n #return self.get_test_perf_data(self.best_model_dir, self.data)\n return self.test_perf_data\n elif subset == 'full':\n return self.get_full_dataset_perf_data(self.data)\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n\n # ****************************************************************************************\n def generate_predictions(self, dataset):\n \"\"\"Generates predictions for specified dataset, as well as uncertainty values if params.uncertainty=True\n\n Args:\n dataset: the deepchem DiskDataset to generate predictions for\n\n Returns:\n (pred, std): numpy arrays containing predictions for compounds and the standard error estimates.\n\n \"\"\"\n pred, std = None, None\n self.log.info(\"Evaluating current model\")\n\n pred = self.model.predict(dataset, self.transformers)\n ncmpds = pred.shape[0]\n pred = pred.reshape((ncmpds,1,-1))\n\n if self.params.uncertainty:\n if self.params.prediction_type == 'regression':\n rf_model = joblib.load(os.path.join(self.best_model_dir, 'model.joblib'))\n ## s.d. from forest\n if self.params.transformers and self.transformers is not None:\n RF_per_tree_pred = [dc.trans.undo_transforms(\n tree.predict(dataset.X), self.transformers) for tree in rf_model.estimators_]\n else:\n RF_per_tree_pred = [tree.predict(dataset.X) for tree in rf_model.estimators_]\n\n # Don't need to \"untransform\" standard deviations here, since they're calculated from\n # the untransformed per-tree predictions.\n std = np.array([np.std(col) for col in zip(*RF_per_tree_pred)]).reshape((ncmpds,1,-1))\n else:\n # We can estimate uncertainty for binary classifiers, but not multiclass (yet)\n nclasses = pred.shape[2]\n if nclasses == 2:\n ntrees = self.params.rf_estimators\n # Use normal approximation to binomial sampling error. Later we can do Jeffrey's interval if we\n # want to get fancy.\n std = np.sqrt(pred * (1-pred) / ntrees)\n else:\n self.log.warning(\"Warning: Random forest only supports uncertainties for binary classifiers.\")\n\n return pred, std\n\n # ****************************************************************************************\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to random forest models.\n\n Returns:\n model_spec_metadata (dict): Returns random forest specific metadata as a subdict under the key 'rf_specific'\n\n \"\"\"\n rf_metadata = {\n 'rf_estimators': self.params.rf_estimators,\n 'rf_max_features': self.params.rf_max_features,\n 'rf_max_depth': self.params.rf_max_depth\n }\n model_spec_metadata = dict(rf_specific = rf_metadata)\n return model_spec_metadata\n \n # ****************************************************************************************\n def _clean_up_excess_files(self, dest_dir):\n \"\"\"\n Function to clean up extra model files left behind in the training process.\n Does not apply to Random Forest.\n \"\"\"\n return\n\n# ****************************************************************************************\nclass DCxgboostModelWrapper(ModelWrapper):\n \"\"\"Contains methods to load in a dataset, split and featurize the data, fit a model to the train dataset,\n generate predictions for an input dataset, and generate performance metrics for these predictions.\n\n\n Attributes:\n Set in __init__\n params (argparse.Namespace): The argparse.Namespace parameter object that contains all parameter information\n featurization (Featurization object): The featurization object created outside of model_wrapper\n log (log): The logger\n output_dir (str): The parent path of the model directory\n transformers (list): Initialized as an empty list, stores the transformers on the response col\n transformers_x (list): Initialized as an empty list, stores the transformers on the featurizers\n model_dir (str): The subdirectory under output_dir that contains the model. Created in setup_model_dirs.\n best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs\n model: The dc.models.sklearn_models.SklearnModel as specified by the params attribute\n\n Created in train:\n data (ModelDataset): contains the dataset, set in pipeline\n best_epoch (int): Set to 0, not applicable\n train_perf_data (PerfObjects): Contains the predictions and performance of the training dataset\n valid_perf_data (PerfObjects): Contains the predictions and performance of the validation dataset\n train_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n valid_perfs (dict): A dictionary of predicted values and metrics on the validation dataset\n\n \"\"\"\n\n def __init__(self, params, featurizer, ds_client):\n \"\"\"Initializes RunModel object.\n\n Args:\n params (Namespace object): contains all parameter information.\n\n featurizer (Featurization): Object managing the featurization of compounds\n ds_client: datastore client.\n \"\"\"\n super().__init__(params, featurizer, ds_client)\n self.best_model_dir = os.path.join(self.output_dir, 'best_model')\n self.model_dir = self.best_model_dir\n os.makedirs(self.best_model_dir, exist_ok=True)\n\n if self.params.prediction_type == 'regression':\n xgb_model = xgb.XGBRegressor(max_depth=self.params.xgb_max_depth,\n learning_rate=self.params.xgb_learning_rate,\n n_estimators=self.params.xgb_n_estimators,\n silent=True,\n objective='reg:squarederror',\n booster='gbtree',\n gamma=self.params.xgb_gamma,\n min_child_weight=self.params.xgb_min_child_weight,\n max_delta_step=0,\n subsample=self.params.xgb_subsample,\n colsample_bytree=self.params.xgb_colsample_bytree,\n colsample_bylevel=1,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n base_score=0.5,\n random_state=0,\n missing=None,\n importance_type='gain',\n n_jobs=-1,\n gpu_id = 0,\n n_gpus = -1,\n max_bin = 16,\n# tree_method = 'gpu_hist',\n seed=0\n )\n else:\n xgb_model = xgb.XGBClassifier(max_depth=self.params.xgb_max_depth,\n learning_rate=self.params.xgb_learning_rate,\n n_estimators=self.params.xgb_n_estimators,\n silent=True,\n objective='binary:logistic',\n booster='gbtree',\n gamma=self.params.xgb_gamma,\n min_child_weight=self.params.xgb_min_child_weight,\n max_delta_step=0,\n subsample=self.params.xgb_subsample,\n colsample_bytree=self.params.xgb_colsample_bytree,\n colsample_bylevel=1,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n base_score=0.5,\n random_state=0,\n importance_type='gain',\n missing=None,\n gpu_id = 0,\n n_jobs=-1, \n n_gpus = -1,\n max_bin = 16,\n# tree_method = 'gpu_hist',\n seed=0\n )\n self.model = dc.models.GBDTModel(xgb_model, model_dir=self.best_model_dir)\n\n # ****************************************************************************************\n def train(self, pipeline):\n \"\"\"Trains a xgboost model and saves the trained model.\n\n Args:\n pipeline (ModelPipeline): The ModelPipeline instance for this model run.\n\n Returns:\n None\n\n Side effects:\n data (ModelDataset): contains the dataset, set in pipeline\n\n best_epoch (int): Set to 0, not applicable to deepchem xgboost models\n\n train_perf_data (PerfData): Contains the predictions and performance of the training dataset\n\n valid_perf_data (PerfData): Contains the predictions and performance of the validation dataset\n\n train_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n\n valid_perfs (dict): A dictionary of predicted values and metrics on the training dataset\n \"\"\"\n\n self.data = pipeline.data\n self.best_epoch = None\n self.train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers,'train')\n self.valid_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid')\n self.test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test')\n\n self.log.info(\"Fitting xgboost model\")\n\n test_dset = pipeline.data.test_dset\n\n num_folds = len(pipeline.data.train_valid_dsets)\n for k in range(num_folds):\n train_dset, valid_dset = pipeline.data.train_valid_dsets[k]\n self.model.fit(train_dset)\n\n train_pred = self.model.predict(train_dset, [])\n train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids)\n\n valid_pred = self.model.predict(valid_dset, [])\n valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids)\n\n test_pred = self.model.predict(test_dset, [])\n test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids)\n self.log.info(\"Fold %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f\" % (\n k, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf,\n pipeline.metric_type, test_perf))\n\n # Compute mean and SD of performance metrics across validation sets for all folds\n self.train_perf, self.train_perf_std = self.train_perf_data.compute_perf_metrics()\n self.valid_perf, self.valid_perf_std = self.valid_perf_data.compute_perf_metrics()\n self.test_perf, self.test_perf_std = self.test_perf_data.compute_perf_metrics()\n\n # Compute score to be used for ranking model hyperparameter sets\n self.model_choice_score = self.valid_perf_data.model_choice_score(self.params.model_choice_score_type)\n\n if num_folds > 1:\n # For k-fold CV, retrain on the combined training and validation sets\n fit_dataset = self.data.combined_training_data()\n self.model.fit(fit_dataset, restore=False)\n self.model_save()\n # The best model is just the single xgb training run.\n self.best_epoch = 0\n\n # ****************************************************************************************\n def reload_model(self, reload_dir):\n\n \"\"\"Loads a saved xgboost model from the specified directory. Also loads any transformers that\n were saved with it.\n\n Args:\n reload_dir (str): Directory where saved model is located.\n\n model_dataset (ModelDataset Object): contains the current full dataset\n\n Side effects:\n Resets the value of model, transformers, and transformers_x\n\n \"\"\"\n\n if self.params.prediction_type == 'regression':\n xgb_model = xgb.XGBRegressor(max_depth=self.params.xgb_max_depth,\n learning_rate=self.params.xgb_learning_rate,\n n_estimators=self.params.xgb_n_estimators,\n silent=True,\n objective='reg:squarederror',\n booster='gbtree',\n gamma=self.params.xgb_gamma,\n min_child_weight=self.params.xgb_min_child_weight,\n max_delta_step=0,\n subsample=self.params.xgb_subsample,\n colsample_bytree=self.params.xgb_colsample_bytree,\n colsample_bylevel=1,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n base_score=0.5,\n random_state=0,\n missing=None,\n importance_type='gain',\n n_jobs=-1,\n gpu_id = 0,\n n_gpus = -1,\n max_bin = 16,\n seed=0\n# tree_method = 'gpu_hist'\n )\n else:\n xgb_model = xgb.XGBClassifier(max_depth=self.params.xgb_max_depth,\n learning_rate=self.params.xgb_learning_rate,\n n_estimators=self.params.xgb_n_estimators,\n silent=True,\n objective='binary:logistic',\n booster='gbtree',\n gamma=self.params.xgb_gamma,\n min_child_weight=self.params.xgb_min_child_weight,\n max_delta_step=0,\n subsample=self.params.xgb_subsample,\n colsample_bytree=self.params.xgb_colsample_bytree,\n colsample_bylevel=1,\n reg_alpha=0,\n reg_lambda=1,\n scale_pos_weight=1,\n base_score=0.5,\n random_state=0,\n importance_type='gain',\n missing=None,\n gpu_id = 0,\n n_jobs=-1, \n n_gpus = -1,\n max_bin = 16,\n seed=0\n# tree_method = 'gpu_hist',\n )\n\n # Restore the transformers from the datastore or filesystem\n self.reload_transformers()\n\n self.model = dc.models.GBDTModel(xgb_model, model_dir=self.best_model_dir)\n self.model.reload()\n\n # ****************************************************************************************\n def get_pred_results(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset.\n\n Args:\n subset: 'train', 'valid', 'test' or 'full' accordingly.\n\n epoch_label: ignored; this function always returns the results for the current model.\n\n Returns:\n A dictionary of parameter, value pairs, in the format expected by the\n prediction_results element of the ModelMetrics data.\n\n Raises:\n ValueError: if subset not in ['train','valid','test','full']\n\n \"\"\"\n if subset == 'train':\n return self.get_train_valid_pred_results(self.train_perf_data)\n elif subset == 'valid':\n return self.get_train_valid_pred_results(self.valid_perf_data)\n elif subset == 'test':\n return self.get_train_valid_pred_results(self.test_perf_data)\n elif subset == 'full':\n return self.get_full_dataset_pred_results(self.data)\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def get_perf_data(self, subset, epoch_label=None):\n \"\"\"Returns predicted values and metrics from a training, validation or test subset\n of the current dataset, or the full dataset.\n\n Args:\n subset (str): may be 'train', 'valid', 'test' or 'full'\n\n epoch_label (not used in random forest, but kept as part of the method structure)\n\n Results:\n PerfData object: Subclass of perfdata object associated with the appropriate subset's split strategy and prediction type.\n\n Raises:\n ValueError: if subset not in ['train','valid','test','full']\n \"\"\"\n\n if subset == 'train':\n return self.train_perf_data\n elif subset == 'valid':\n return self.valid_perf_data\n elif subset == 'test':\n #return self.get_test_perf_data(self.best_model_dir, self.data)\n return self.test_perf_data\n elif subset == 'full':\n return self.get_full_dataset_perf_data(self.data)\n else:\n raise ValueError(\"Unknown dataset subset '%s'\" % subset)\n\n # ****************************************************************************************\n def generate_predictions(self, dataset):\n \"\"\"Generates predictions for specified dataset, as well as uncertainty values if params.uncertainty=True\n\n Args:\n dataset: the deepchem DiskDataset to generate predictions for\n\n Returns:\n (pred, std): numpy arrays containing predictions for compounds and the standard error estimates.\n\n \"\"\"\n pred, std = None, None\n self.log.warning(\"Evaluating current model\")\n\n pred = self.model.predict(dataset, self.transformers)\n ncmpds = pred.shape[0]\n pred = pred.reshape((ncmpds, 1, -1))\n self.log.warning(\"uncertainty not supported by xgboost models\")\n\n return pred, std\n\n # ****************************************************************************************\n def get_model_specific_metadata(self):\n \"\"\"Returns a dictionary of parameter settings for this ModelWrapper object that are specific\n to xgboost models.\n\n Returns:\n model_spec_metadata (dict): Returns xgboost specific metadata as a subdict under the key 'xgb_specific'\n\n \"\"\"\n xgb_metadata = {\"xgb_max_depth\" : self.params.xgb_max_depth,\n \"xgb_learning_rate\" : self.params.xgb_learning_rate,\n \"xgb_n_estimators\" : self.params.xgb_n_estimators,\n \"xgb_gamma\" : self.params.xgb_gamma,\n \"xgb_min_child_weight\" : self.params.xgb_min_child_weight,\n \"xgb_subsample\" : self.params.xgb_subsample,\n \"xgb_colsample_bytree\" :self.params.xgb_colsample_bytree\n }\n model_spec_metadata = dict(xgb_specific=xgb_metadata)\n return model_spec_metadata\n\n # ****************************************************************************************\n def _clean_up_excess_files(self, dest_dir):\n \"\"\"\n Function to clean up extra model files left behind in the training process.\n Does not apply to xgboost\n \"\"\"\n return\n"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"numpy.sqrt",
"torch.load",
"torch.sum",
"torch.utils.data.DataLoader",
"numpy.nan_to_num",
"numpy.concatenate",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.save",
"tensorflow.Graph",
"torch.nn.Dropout",
"sklearn.ensemble.RandomForestClassifier",
"torch.utils.data.TensorDataset",
"torch.tensor",
"numpy.std",
"numpy.zeros",
"torch.nn.Sequential",
"tensorflow.executing_eagerly",
"numpy.isnan",
"torch.nn.Linear",
"torch.log",
"numpy.array",
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.latest_checkpoint",
"tensorflow.compat.v1.Session",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tblaschke/reinvent-multi-target | [
"a555431da0de038b7e643c508a7d2501e83c087f"
] | [
"reinvent/unittest_reinvent/scoring_tests/physchem/test_hbd_lipinski.py"
] | [
"import unittest\n\nimport numpy as np\nimport numpy.testing as npt\n\nfrom scoring.component_parameters import ComponentParameters\nfrom scoring.function import CustomSum\nfrom utils.enums.component_specific_parameters_enum import ComponentSpecificParametersEnum\nfrom utils.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum\nfrom utils.enums.transformation_type_enum import TransformationTypeEnum\n\n\nclass Test_tpsa_score_no_transformation(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n sf_enum = ScoringFunctionComponentNameEnum()\n csp_enum = ComponentSpecificParametersEnum()\n ts_parameters = ComponentParameters(component_type=sf_enum.NUM_HBD_LIPINSKI,\n name=\"NumHBD_Lipinski\",\n weight=1.,\n smiles=[],\n model_path=\"\",\n specific_parameters={\n csp_enum.TRANSFORMATION: False\n })\n self.sf_state = CustomSum(parameters=[ts_parameters])\n\n def test_hbd_1(self):\n smiles = [\n \"C(=O)N\",\n 'O=S(=O)(c3ccc(n1nc(cc1c2ccc(cc2)C)C(F)(F)F)cc3)N'\n ]\n values = np.array([1., 1.])\n score = self.sf_state.get_final_score(smiles=smiles)\n npt.assert_array_equal(score.total_score, values)\n\n\nclass Test_tpsa_score_with_double_sigmoid(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n sf_enum = ScoringFunctionComponentNameEnum()\n csp_enum = ComponentSpecificParametersEnum()\n tt_enum = TransformationTypeEnum()\n specific_parameters = {\n csp_enum.TRANSFORMATION: True,\n csp_enum.LOW: 0,\n csp_enum.HIGH: 1,\n csp_enum.TRANSFORMATION_TYPE: tt_enum.STEP\n }\n ts_parameters = ComponentParameters(component_type=sf_enum.NUM_HBD_LIPINSKI,\n name=\"NumHBD_Lipinski\",\n weight=1.,\n smiles=[],\n model_path=\"\",\n specific_parameters=specific_parameters\n )\n self.sf_state = CustomSum(parameters=[ts_parameters])\n\n def test_hbd_1(self):\n smiles = [\n \"C(=O)N\",\n 'O=S(=O)(c3ccc(n1nc(cc1c2ccc(cc2)C)C(F)(F)F)cc3)N'\n ]\n values = np.array([1.0, 1.0])\n score = self.sf_state.get_final_score(smiles=smiles)\n npt.assert_array_equal(score.total_score, values)\n\n\n\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yongleex/SBCC | [
"40f8e67e446fc14fc82ea87f82ee841d62520c71"
] | [
"utils/outlier.py"
] | [
"import numpy as np\nimport cv2\n\n\ndef NMT(u,v, eps=0.2, thr=5.0, smooth_flag=True):\n \"\"\" \n Normalised Median Test, from 'Universal outlier detection for PIV data'\n \"\"\"\n u, v = np.float32(u), np.float32(v)\n criterion = 0\n \n for c in [u,v]:\n c_median = cv2.medianBlur(c, 5)\n residual = np.abs(c - c_median)\n r_median = cv2.medianBlur(residual, 5)\n cri = residual/(r_median + eps)\n criterion += np.power(cri, 2)\n\n criterion = np.sqrt(criterion)\n index = criterion > thr\n\n u_out, v_out = u, v\n u_out[index] = cv2.medianBlur(u, 5)[index]\n v_out[index] = cv2.medianBlur(v, 5)[index]\n \n if smooth_flag:\n u_out = cv2.GaussianBlur(u_out, (3,3),0)\n v_out = cv2.GaussianBlur(v_out, (3,3),0)\n return u_out, v_out, index\n\n\n\n"
] | [
[
"numpy.abs",
"numpy.sqrt",
"numpy.float32",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eglrp/ConvPoint_Keras | [
"66c94479ff8dc8ad174ed4da8e6bb1d641a8a8c0"
] | [
"ConvPoint.py"
] | [
"from dataTool import ReadLabels, ReadXYZ, VisualizePointCloudClassesAsync, modelPath, DataTool\nfrom imports import *\nimport math\nimport numpy as np\nfrom time import time \n\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.utils import Sequence\nfrom tensorflow.keras.layers import Input, BatchNormalization, Dense, Dropout, InputLayer\n\nfrom sklearn.neighbors import KDTree\nfrom sklearn.metrics import confusion_matrix\nfrom PIL import Image, ImageEnhance, ImageOps\n\nimport random\n# from notify_run import Notify\n\nclass Const:\n @staticmethod\n def IsWindowsMachine():\n if os.path.isdir(\"C:/Program Files\"):\n return True\n else: \n return False\n \n if os.path.isdir(\"C:/Program Files\"):\n batchSize = 8\n else:\n batchSize = 16 #25\n\n #Placeholders\n classCount = Label.Semantic3D.Count-1\n classNames = Label.Semantic3D.Names\n\n testFiles = []\n excludeFiles = []\n Paths = Paths.Semantic3D\n \n epochs = 100\n pointComponents = 3\n featureComponents = 3 #rgb \n classCount = 0\n npoints = 8192\n blocksize = 8\n test_step = 0.5\n name = \"\"\n\n #Algorithm configuration\n noFeature = False\n Fusion = False\n Scale = False\n Rotate = False\n Mirror = False\n Jitter = False\n FtrAugment = False\n\n logsPath = \"./logs\"\n ### MODEL CONFIG\n pl = 64\n ### MODEL CONFIG\n\n def BuildSpecDict(self):\n return {\"noFeature\" : self.noFeature,\n \"Fusion\" : self.Fusion,\n \"Scale\" : self.Scale,\n \"Rotate\" : self.Rotate,\n \"Mirror\" : self.Mirror,\n \"Jitter\" : self.Jitter,\n \"FtrAugment\" : False if self.noFeature else self.FtrAugment,\n }\n\n def Name(self, UID = \"\"):\n modelName = self.name\n \n modelName += f\"({len(self.TrainFiles())}&{len(self.TestFiles())})\"\n\n for spec, value in self.BuildSpecDict().items():\n if(value == True):\n modelName += f\"({spec})\"\n\n if(UID != \"\"):\n modelName += f\"_{UID}\"\n\n return modelName\n \n @staticmethod\n def RemoveUID(name : str):\n return name.replace(f\"_{Const.ParseModelUID(name)}\", \"\")\n \n @staticmethod\n def UID():\n import uuid\n return uuid.uuid4().hex\n \n @staticmethod\n def ParseModelConfig(file):\n config = Paths.FileName(file).split(\"_\")[0].replace(\"(\",\" \").replace(\")\",\"\").replace(\"vox \",\"\").split(\" \")\n\n const = None\n if(config[0] == NPM3D.name):\n const = NPM3D() \n if(config[0] == Semantic3D.name):\n const = Semantic3D()\n \n for conf in config[1:]:\n if conf == \"noFeature\" or conf == \"NOCOL\":\n const.noFeature = True\n elif conf == \"Fusion\":\n const.Fusion = True\n elif conf == \"Scale\":\n const.Scale = True\n elif conf == \"Rotate\":\n const.Rotate = True\n elif conf == \"Mirror\":\n const.Mirror = True\n elif conf == \"Jitter\":\n const.Jitter = True\n elif conf == \"FtrAugment\":\n const.FtrAugment = True\n \n return const\n \n @staticmethod\n def ParseModelUID(file):\n parts = Paths.FileName(file).split(\"_\")\n\n if(len(parts) >= 2):\n return parts[1]\n else:\n return None\n\n @staticmethod\n def ParseModelName(file, withUID = True):\n parts = Paths.FileName(file, withoutExt = False).split(\"_\")\n\n name = parts[0]\n if(withUID and len(parts) > 1):\n name += \"_\"+parts[1]\n\n return name\n\n def TestFiles(self): \n return Paths.JoinPaths(self.Paths.processedTrain, self.testFiles)\n\n def TrainFiles(self):\n return Paths.GetFiles(self.Paths.processedTrain, excludeFiles = self.TestFiles()+self.excludeFiles)\n\nclass Semantic3D(Const): \n pointComponents = 3\n featureComponents = 3 #rgb\n classCount = Label.Semantic3D.Count-1\n classNames = Label.Semantic3D.Names\n test_step = 0.8\n name = \"Sem3D\"\n Paths = Paths.Semantic3D\n\n testFiles = [\n \"untermaederbrunnen_station3_xyz_intensity_rgb_voxels.npy\",\n \"domfountain_station1_xyz_intensity_rgb_voxels.npy\",\n ]\n\n excludeFiles = []\n\n fileNames = {\"birdfountain_station1_xyz_intensity_rgb\" : \"birdfountain1\",\n \"castleblatten_station1_intensity_rgb\" : \"castleblatten1\",\n \"castleblatten_station5_xyz_intensity_rgb\" : \"castleblatten5\",\n \"marketplacefeldkirch_station1_intensity_rgb\" : \"marketsquarefeldkirch1\",\n \"marketplacefeldkirch_station4_intensity_rgb\" : \"marketsquarefeldkirch4\",\n \"marketplacefeldkirch_station7_intensity_rgb\" : \"marketsquarefeldkirch7\",\n \"sg27_station3_intensity_rgb\" : \"sg27_3\",\n \"sg27_station6_intensity_rgb\" : \"sg27_6\",\n \"sg27_station8_intensity_rgb\" : \"sg27_8\",\n \"sg27_station10_intensity_rgb\" : \"sg27_10\",\n \"sg28_station2_intensity_rgb\" : \"sg28_2\",\n \"sg28_station5_xyz_intensity_rgb\" : \"sg28_5\",\n \"stgallencathedral_station1_intensity_rgb\" : \"stgallencathedral1\",\n \"stgallencathedral_station3_intensity_rgb\" : \"stgallencathedral3\",\n \"stgallencathedral_station6_intensity_rgb\" : \"stgallencathedral6\",\n\n \"MarketplaceFeldkirch_Station4_rgb_intensity-reduced\" : \"marketsquarefeldkirch4-reduced\",\n \"sg27_station10_rgb_intensity-reduced\" : \"sg27_10-reduced\",\n \"sg28_Station2_rgb_intensity-reduced\" : \"sg28_2-reduced\",\n \"StGallenCathedral_station6_rgb_intensity-reduced\" : \"stgallencathedral6-reduced\",\n }\n\nclass Curbs(Const): \n pointComponents = 3\n featureComponents = 3\n classCount = 2\n classNames = Label.Curbs.Names\n test_step = 0.5\n name = \"Curbs\"\n Paths = Paths.Curbs\n\n if os.path.isdir(\"C:/Program Files\"):\n batchSize = 8\n else:\n batchSize = 25\n\n testFiles = [\n \"park_extracted.npy\",\n \"Jelskio_str_trimmed.npy\",\n ]\n \n excludeFiles = [\n \"powerlines_dataset\"\n ]\n\n def FilterCurbAndLineFiles(self, files):\n return [file for file in files if not file.endswith(\"_curbs.npy\") and not file.endswith(\"_lines.npy\")]\n\n def TestFiles(self): \n return self.FilterCurbAndLineFiles(super(Curbs, self).TestFiles())\n\n def TrainFiles(self):\n return self.FilterCurbAndLineFiles(super(Curbs, self).TrainFiles())\n\nclass NPM3D(Const):\n pointComponents = 3\n featureComponents = 1\n classCount = Label.NPM3D.Count-1\n classNames = Label.NPM3D.Names\n test_step = 0.5\n name = \"NPM3D\"\n Paths = Paths.NPM3D \n\n testFiles = [\n # \"Lille1_1_0.npy\",\n # \"Lille1_1_1.npy\",\n # \"Lille1_1_2.npy\",\n # \"Lille1_1_3.npy\",\n # \"Lille1_1_4.npy\",\n # \"Lille1_1_5.npy\",\n # \"Lille1_1_6.npy\",\n # \"Lille1_1_7.npy\",\n # \"Lille1_1_8.npy\",\n\n # \"Lille1_2_0.npy\",\n # \"Lille1_2_1.npy\",\n \n \"Lille2_0.npy\",\n \"Lille2_1.npy\",\n \"Lille2_2.npy\", \n \"Lille2_8.npy\", \n \"Lille2_9.npy\", \n\n # \"Paris_0.npy\",\n # \"Paris_1.npy\",\n ]\n \n excludeFiles = [\n # \"Lille1_1_7.npy\",\n # \"Lille1_2_2.npy\",\n \"Lille2_10.npy\",\n # \"Paris_2.npy\",\n ]\n\nclass WeightsMul(tf.keras.layers.Layer):\n def __init__(self, shape, lowBound, highBound, **kwargs):\n super(WeightsMul, self).__init__(**kwargs)\n self.shape = shape\n self.lowBound = lowBound\n self.highBound = highBound\n\n def build(self, input_shape):\n init = tf.random_uniform_initializer(self.lowBound, self.highBound)\n self.vars = self.add_weight(shape=(self.shape), \n initializer = init, \n trainable = True, dtype=tf.float32)\n\n def call(self, inputs): \n return tf.matmul(inputs, self.vars)\n \n def get_config(self):\n config = super(WeightsMul, self).get_config()\n config.update({'shape': self.shape, 'lowBound': self.lowBound, 'highBound': self.highBound})\n return config\n\nclass GatherNDLayer(tf.keras.layers.Layer):\n def __init__(self, **kwargs): \n super(GatherNDLayer, self).__init__(**kwargs)\n \n def call(self, array, indices):\n return tf.gather_nd(array, indices, batch_dims=1)\n \n def get_config(self):\n config = super(GatherNDLayer, self).get_config()\n return config\n\nclass SubstractCenters(tf.keras.layers.Layer):\n def __init__(self, dim, n_centers, **kwargs):\n super(SubstractCenters, self).__init__(**kwargs)\n self.dim = dim\n self.n_centers = n_centers\n \n def build(self, input_shape):\n center_data = np.zeros((self.dim, self.n_centers))\n for i in range(self.n_centers):\n coord = np.random.rand(self.dim)*2 - 1\n while (coord**2).sum() > 1:\n coord = np.random.rand(self.dim)*2 - 1\n center_data[:,i] = coord\n\n self.centers = self.add_weight(shape = (center_data.shape), \n initializer = tf.constant_initializer(center_data), \n trainable = True, dtype=tf.float32)\n\n def call(self, points): \n return points - self.centers\n \n def get_config(self):\n config = super(SubstractCenters, self).get_config()\n config.update({'dim': self.dim, 'n_centers': self.n_centers})\n return config\n\nclass UnitBallNormalize(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(UnitBallNormalize, self).__init__(**kwargs)\n\n def call(self, points):\n maxi = tf.sqrt(tf.reduce_max(tf.reduce_sum(tf.square(tf.stop_gradient(points)), axis = 3), axis = 2))\n maxi = tf.where(tf.equal(maxi, 0.0), tf.constant(1.0), maxi)\n points = points / tf.expand_dims(tf.expand_dims(maxi, 2), 3)\n return points\n \n def get_config(self):\n config = super(UnitBallNormalize, self).get_config()\n return config\n\ndef PtConv(fts, points, K, next_pts, in_features, out_features, n_centers = 16):\n next_pts_ = None\n if isinstance(next_pts, int) and points.get_shape()[1] != next_pts:\n # convolution with reduction\n indices, next_pts_ = KDTreeSampleLayer(K, next_pts)(points)\n elif (next_pts is None) or (isinstance(next_pts, int) and points.get_shape()[1] == next_pts):\n # convolution without reduction\n indices = KDTreeLayer(K)(points, points)\n next_pts_ = points\n else:\n # convolution with up sampling or projection on given points\n indices = KDTreeLayer(K)(points, next_pts)\n next_pts_ = next_pts\n \n if next_pts is None or isinstance(next_pts, int):\n next_pts = next_pts_\n\n # get the features and point cooridnates associated with the indices\n pts = GatherNDLayer()(points, indices)\n if fts is None:\n features = tf.expand_dims(tf.ones_like(pts[:,:,:,0]), 3)\n else:\n features = GatherNDLayer()(fts, indices) \n\n # center the neighborhoods\n pts = pts - tf.expand_dims(next_pts,2)\n\n # normalize to unit ball, or not\n pts = UnitBallNormalize()(pts)\n\n # compute the distances\n dists = SubstractCenters(3, n_centers)(tf.expand_dims(pts, 4))\n\n dShape = dists.shape\n dists = tf.reshape(dists, (-1, dShape[1], dShape[2], dShape[3]*dShape[4]))\n\n dists = DenseInitialized(2*n_centers, activation=\"relu\")(dists)\n dists = DenseInitialized(n_centers, activation=\"relu\")(dists)\n dists = DenseInitialized(n_centers, activation=\"relu\")(dists)\n \n # compute features \n fs = features.shape # [batch, points, n_centers, in_features]\n ds = dists.shape\n\n features = tf.transpose(features,[0, 1, 3, 2])\n features = tf.reshape(features, (-1, features.shape[2], features.shape[3])) #features.shape[0]*features.shape[1]\n dists = tf.reshape(dists, (-1, dists.shape[2], dists.shape[3])) #dists.shape[0]*dists.shape[1]\n\n features = tf.matmul(features, dists)\n features = tf.reshape(features, (-1, ds[1], features.shape[1]*features.shape[2]))\n\n bound = math.sqrt(3.0) * math.sqrt(2.0 / (in_features + out_features))\n features = WeightsMul([in_features * n_centers, out_features], -bound, bound)(features)\n\n features = features / fs[2]\n\n # normalization and activation\n features = BatchNormalization(epsilon = 1e-05, momentum=0.9)(features) \n\n features = tf.nn.relu(features)\n\n return features, next_pts\n\ndef LinearInitializer(k):\n k = np.sqrt(1.0/float(k))\n return tf.random_uniform_initializer(k*-1, k)\n\ndef DenseInitialized(out_features, activation = None, name = None):\n def DenseInit(x):\n return Dense(out_features, \n kernel_initializer = tf.initializers.lecun_normal(),\n bias_initializer = tf.initializers.lecun_normal(),\n activation = activation,\n name = name,\n )(x)\n\n return DenseInit\n\ndef CreateModel(classCount, ftsComp, in_fts = None, in_pts = None, returnFeatures = False, noColor = False, applySoftmax = True):\n print(\"Creating new model...\")\n \n if(in_fts is None and in_pts is None):\n in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32) #points \n\n if(noColor):\n in_fts = None\n else:\n in_fts = Input(shape=(Const.npoints, ftsComp), dtype=tf.float32) #featuress \n \n if(noColor):\n in_fts = None\n\n pl = Const.pl\n ### Down Sample\n x0, _ = PtConv(in_fts, in_pts, K = 16, next_pts = None, in_features = ftsComp, out_features = pl)\n x1, pts1 = PtConv(x0, in_pts, K = 16, next_pts = 2048, in_features = pl, out_features = pl)\n x2, pts2 = PtConv(x1, pts1, K = 16, next_pts = 1024, in_features = pl, out_features = pl)\n x3, pts3 = PtConv(x2, pts2, K = 16, next_pts = 256, in_features = pl, out_features = pl)\n x4, pts4 = PtConv(x3, pts3, K = 8, next_pts = 64, in_features = pl, out_features = pl*2)\n x5, pts5 = PtConv(x4, pts4, K = 8, next_pts = 16, in_features = pl*2, out_features = pl*2)\n x6, pts6 = PtConv(x5, pts5, K = 4, next_pts = 8, in_features = pl*2, out_features = pl*2)\n\n ## Up Sample\n x5d, _ = PtConv(x6, pts6, K = 4, next_pts = pts5, in_features = pl*2, out_features = pl*2)\n x5d = tf.concat([x5d, x5], axis = 2)\n\n x4d, _ = PtConv(x5d, pts5, K = 4, next_pts = pts4, in_features = pl*4, out_features = pl*2)\n x4d = tf.concat([x4d, x4], axis = 2)\n\n x3d, _ = PtConv(x4d, pts4, K = 4, next_pts = pts3, in_features = pl*4, out_features = pl)\n x3d = tf.concat([x3d, x3], axis = 2)\n\n x2d, _ = PtConv(x3d, pts3, K = 8, next_pts = pts2, in_features = pl*2, out_features = pl)\n x2d = tf.concat([x2d, x2], axis = 2)\n\n x1d, _ = PtConv(x2d, pts2, K = 8, next_pts = pts1, in_features = pl*2, out_features = pl)\n x1d = tf.concat([x1d, x1], axis = 2)\n\n x0d, _ = PtConv(x1d, pts1, K = 8, next_pts = in_pts, in_features = pl*2, out_features = pl)\n x0d = tf.concat([x0d, x0], axis = 2)\n \n ### Output layer\n out_labels = Dropout(rate=0.5)(x0d)\n \n out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]))\n \n out_labels = DenseInitialized(classCount)(out_labels)\n\n out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))\n\n if(applySoftmax):\n out_labels = tf.nn.softmax(out_labels)\n\n if(noColor):\n inputList = [in_pts]\n else:\n inputList = [in_fts, in_pts]\n\n if(returnFeatures):\n return Model(inputList, [x0d, out_labels], name =\"model\")\n \n model = Model(inputList, out_labels, name =\"model\")\n model = CompileModel(model, classCount) \n # print(model.summary())\n return model\n\ndef ModifyModelOutput(model, classCount):\n dropoutLayer = model.layers[len(model.layers)-5] #take output of the drop out layer\n out_labels = dropoutLayer.output\n\n out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]), name = \"lbl_reshape_1\")\n out_labels = DenseInitialized(classCount, name = \"lbl_dense\")(out_labels) \n out_labels = tf.reshape(out_labels, (-1, dropoutLayer.input.shape[1], out_labels.shape[1]), name = \"lbl_reshape_2\")\n out_labels = tf.nn.softmax(out_labels, name = \"lbl_softmax\")\n\n return Model(model.inputs, out_labels, name =\"model\")\n\ndef ReadModel(modelPath):\n if(not modelPath.endswith(\".h5\")):\n modelPath += \".h5\"\n\n if(not os.path.exists(modelPath)):\n if(os.path.exists(os.path.join(\".\" , \"data\", modelPath))):\n modelPath = os.path.join(\".\" , \"data\", modelPath)\n elif(os.path.exists(os.path.join(\".\" , \"data\", Const.ParseModelName(modelPath, False)))):\n file = os.path.basename(modelPath)\n folder = os.path.join(\".\" , \"data\", Const.ParseModelName(modelPath, False))\n modelPath = os.path.join(folder, file)\n elif(os.path.exists(os.path.join(\".\" , \"data\", Const.ParseModelName(modelPath)))):\n file = os.path.basename(modelPath)\n folder = os.path.join(\".\" , \"data\", Const.ParseModelName(modelPath))\n modelPath = os.path.join(folder, file)\n\n if(not os.path.exists(modelPath)):\n raise FileNotFoundError \n\n model = tf.keras.models.load_model(modelPath, compile=False,\n custom_objects={'NearestNeighborsLayer': NearestNeighborsLayer, \n 'SampleNearestNeighborsLayer': SampleNearestNeighborsLayer,\n 'SubstractCenters': SubstractCenters,\n 'WeightsMul': WeightsMul,\n 'GatherNDLayer':GatherNDLayer,\n 'UnitBallNormalize':UnitBallNormalize,\n 'KDTreeSampleLayer':KDTreeSampleLayer,\n 'KDTreeLayer':KDTreeLayer,\n })\n\n PrintToLog(\"{} model loaded\".format(modelPath))\n return model\n\ndef LatestModel(path):\n if(Const.ParseModelUID(path) is None):\n folders = [os.path.join(\".\" , \"data\",folder) for folder in os.listdir(os.path.join(\".\" , \"data\")) \n if os.path.isdir(os.path.join(\".\" , \"data\",folder)) \n and path == Const.RemoveUID(Const.ParseModelName(folder))\n and len(Paths.GetFiles(os.path.join(\".\" , \"data\",folder), findExtesions=\".h5\")) > 0]\n path = max(folders, key=os.path.getctime)\n else:\n path = os.path.join(\".\" , \"data\", Const.ParseModelName(path)) \n\n try:\n latestModel = max(Paths.GetFiles(path, findExtesions=\".h5\"), key=os.path.getctime)\n except:\n print(f\"No model found in: {path}\")\n latestModel = None\n\n return latestModel\n\nimport re\ndef ModelValMIOU(path):\n result = re.findall(\"val\\((.+)\\)\", path)\n return float(result[0])\n\ndef HighestValMIOUModel(path):\n if(not os.path.isdir(path)):\n path = os.path.join(\".\" , \"data\", os.path.basename(path).split(\"_\")[0])\n\n latestModel = max(Paths.GetFiles(path, findExtesions=\".h5\"), key=ModelValMIOU)\n return latestModel\n\ndef LoadModel(modelPath, consts):\n model = ReadModel(modelPath)\n\n modified = False\n if(model.output.shape[2] != consts.classCount):\n print(\"Model output {} classes changed to {}\".format(model.output.shape[2], consts.classCount))\n modified = True\n model = ModifyModelOutput(model, consts.classCount)\n\n model = CompileModel(model, consts.classCount)\n # model.summary()\n return model, modified\n\ndef ReadModelConfig(path):\n Model = ReadModel(path)\n modelConfig = Const.ParseModelConfig(path)\n return Model, modelConfig\n\ndef CreateModelCopy(Model, modelConfig, in_pts, in_RGB):\n inputFeatures = 1 if modelConfig.noFeature else modelConfig.featureComponents\n newModel = CreateModel(modelConfig.classCount, inputFeatures, in_RGB, in_pts, noColor=modelConfig.noFeature, returnFeatures=True, applySoftmax=False)\n\n if(Model != None):\n for new_layer, layer in zip(newModel.layers, Model.layers):\n new_layer.set_weights(layer.get_weights())\n\n return newModel\n\ndef FuseModels(modelPaths, consts):\n fusionModel = None\n\n assert(len(modelPaths) == 2 or modelPaths is None)\n print(\"Model fusion\")\n \n if(not modelPaths is None):\n ModelA, modelAConfig = ReadModelConfig(modelPaths[0])\n ModelB, modelBConfig = ReadModelConfig(modelPaths[1])\n else:\n consts.noFeature = False\n modelAConfig = consts\n consts.noFeature = True\n modelBConfig = consts\n\n in_RGB = None\n if(not modelAConfig.noFeature or not modelBConfig.noFeature):\n in_RGB = Input(shape=(Const.npoints, consts.featureComponents), dtype=tf.float32, name = \"In_RGB\") #features\n in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32, name = \"In_pts\") #points\n\n newModelA = CreateModelCopy(ModelA, modelAConfig, in_pts, in_RGB)\n newModelB = CreateModelCopy(ModelB, modelBConfig, in_pts, in_RGB)\n\n x = tf.concat((newModelA.output[0], newModelB.output[0]), axis = 2) #fuse features from both models\n\n x1, _ = PtConv(x, in_pts, K = 16, next_pts = Const.npoints, in_features = 2*128, out_features = 96)\n x2, _ = PtConv(x1, in_pts, K = 16, next_pts = Const.npoints, in_features = 96, out_features = 48)\n x0d = tf.concat([x2, newModelA.output[1], newModelB.output[1]], axis = 2)\n\n out_labels = tf.reshape(x0d, (-1, x0d.shape[2]))\n out_labels = Dropout(rate=0.5)(out_labels)\n out_labels = DenseInitialized(consts.classCount)(out_labels)\n out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))\n\n out_labels = tf.nn.softmax(out_labels)\n\n fusionModel = Model([in_pts] if in_RGB is None else [in_RGB, in_pts], out_labels, name =\"model\")\n\n nontrainableNames = [x.name for x in newModelA.layers] + [x.name for x in newModelB.layers]\n # nontrainableNames = [x.name for x in newModelA.layers]\n count = 0\n for i, layer in enumerate(fusionModel.layers):\n if(layer.name in nontrainableNames):\n layer.trainable = False\n count += 1\n\n PrintToLog(f\"{len(fusionModel.layers)-count}/{len(fusionModel.layers)} layers are trainable.\")\n\n fusionModel = CompileModel(fusionModel, consts.classCount)\n # fusionModel.summary()\n return fusionModel\n\nclass MIOU(tf.keras.metrics.Metric):\n \n def __init__(self, classCount, name='miou', **kwargs):\n super(MIOU, self).__init__(name=name, **kwargs)\n self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)\n self.classCount = classCount\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)\n PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)\n confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount) \n self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))\n\n def result(self):\n union = tf.linalg.diag_part(self.cm)\n rowSum = tf.math.reduce_sum(self.cm, axis = 0)\n colSum = tf.math.reduce_sum(self.cm, axis = 1)\n intersection = (colSum + rowSum - union)\n intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)\n iou = union / intersection\n miou = tf.expand_dims(tf.convert_to_tensor(tf.reduce_sum(iou) / tf.cast(iou.shape[0], dtype=np.float64)), 0)\n return tf.concat((tf.expand_dims(miou,1), tf.cast(tf.expand_dims(iou,1), tf.float64)), 0)\n\n def reset_states(self):\n # The state of the metric will be reset at the start of each epoch.\n self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))\n\ndef moving_miou_metric(classCount):\n def moving_iou(y_true, y_pred):\n TrueLbl = tf.argmax(tf.reshape(y_true, [-1, classCount]), axis= 1)\n PredLbl = tf.argmax(tf.reshape(y_pred, [-1, classCount]), axis= 1)\n\n cm = tf.math.confusion_matrix(TrueLbl, PredLbl, classCount)\n\n union = tf.linalg.diag_part(cm)\n\n rowSum = tf.math.reduce_sum(cm, axis = 0)\n colSum = tf.math.reduce_sum(cm, axis = 1)\n\n intersection = (colSum + rowSum - union)+1\n\n iou = union / intersection\n\n return tf.reduce_sum(iou) / tf.cast(tf.math.maximum(iou.shape[0], 1), dtype=np.float64)\n\n return moving_iou\n\nclass IOU(tf.keras.metrics.Metric):\n def __init__(self, classCount, classIndex, name='iou', **kwargs):\n super(IOU, self).__init__(name=name, **kwargs)\n self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)\n self.classCount = classCount\n self.classIndex = classIndex\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)\n PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)\n confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount)\n self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))\n\n def result(self):\n union = tf.linalg.diag_part(self.cm)\n rowSum = tf.math.reduce_sum(self.cm, axis = 0)\n colSum = tf.math.reduce_sum(self.cm, axis = 1)\n intersection = (colSum + rowSum - union)\n intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)\n iou = union / intersection\n return tf.cast(tf.expand_dims(iou, 1)[self.classIndex], tf.float64)\n\n def reset_states(self):\n # The state of the metric will be reset at the start of each epoch.\n self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))\n\ndef weighted_categorical_crossentropy(weights):\n # weights = [0.9,0.05,0.04,0.01]\n def wcce(y_true, y_pred):\n Kweights = tf.constant(weights)\n y_true = tf.cast(y_true, y_pred.dtype)\n return tf.keras.losses.categorical_crossentropy(y_true, y_pred) * tf.math.reduce_sum(y_true * Kweights, axis=-1)\n\n return wcce\n\ndef CompileModel(model, classCount):\n model.compile(\n optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3, epsilon = 1e-8),\n loss = tf.keras.losses.CategoricalCrossentropy(),\n # loss = weighted_categorical_crossentropy([0.7, 5]),\n metrics= [IOU(classCount, 0, name=\"other\"), IOU(classCount, 1, name=\"curb\")] if classCount == 2 else [MIOU(classCount)]\n )\n return model\n\nclass IOUPerClass(tf.keras.callbacks.Callback):\n def __init__(self, plot_path, classNames, firstEpoch = 0, metric = \"miou\"):\n self.metric = metric\n self.epoch = firstEpoch \n self.classCount = len(classNames)\n self.classNames = classNames\n self.path = plot_path\n\n print(f\"IOU logs path: {self.path}\")\n\n self.writers = []\n self.val_writers = []\n ioupath = os.path.join(plot_path, \"iou\")\n os.makedirs(ioupath, exist_ok=True)\n for i in range(self.classCount):\n path = os.path.join(ioupath, classNames[i])\n os.makedirs(path, exist_ok=True)\n self.writers.append(tf.summary.create_file_writer(path))\n\n path = os.path.join(ioupath, \"val_\"+classNames[i])\n os.makedirs(path, exist_ok=True)\n self.val_writers.append(tf.summary.create_file_writer(path))\n # print(\"Writer path: \", path)\n \n self.InitializeMIOUWriter() \n\n def InitializeMIOUWriter(self):\n mioupath = os.path.join(self.path, \"miou\")\n os.makedirs(mioupath, exist_ok=True)\n\n path = os.path.join(mioupath, \"miou\")\n os.makedirs(path, exist_ok=True)\n self.miou_writer = tf.summary.create_file_writer(path)\n\n path = os.path.join(mioupath, \"val_miou\")\n os.makedirs(path, exist_ok=True)\n self.val_miou_writer = tf.summary.create_file_writer(path)\n \n def WriteLog(self, writer, metric, logs, epoch, tag = \"miou\"):\n value = logs.get(metric)\n if(value is None):\n print(f\"Failed getting {metric} log\")\n return False\n \n with writer.as_default():\n tf.summary.scalar(tag, value[0][0], step=epoch)\n writer.flush()\n\n def WriteLogs(self, writers, metric, logs, epoch, tag = \"iou\"):\n metrix = logs.get(metric)\n if(metrix is None):\n print(f\"Failed getting {metric} log\")\n return False\n\n iou = [i[0] for i in metrix[len(metrix)-self.classCount:]]\n for i in range(len(iou)):\n with writers[i].as_default():\n tf.summary.scalar(tag, iou[i], step=epoch)\n writers[i].flush()\n \n def on_epoch_end(self, batch, logs=None):\n self.WriteLogs(self.writers, self.metric, logs, self.epoch)\n self.WriteLogs(self.val_writers, \"val_\"+self.metric, logs, self.epoch)\n\n self.WriteLog(self.miou_writer, self.metric, logs, self.epoch)\n self.WriteLog(self.val_miou_writer, \"val_\"+self.metric, logs, self.epoch)\n self.epoch += 1\n\nlogSaveDir = \"\"\ndef WriteToLog(msg):\n if(os.path.isdir(logSaveDir)):\n logFile = open(logSaveDir+f\"/training.log\", \"a\")\n logFile.write(msg+\"\\n\")\n logFile.close()\n\ndef PrintToLog(msg):\n print(msg)\n WriteToLog(msg)\n\nclass ModelSaveCallback(tf.keras.callbacks.Callback):\n def __init__(self, saveDir, trainingSteps, metric = \"accuracy\", modelNamePrefix = \"\", sendNotifications = False):\n super().__init__()\n self.saveDir = saveDir\n self.metric = metric\n self.modelNamePrefix = modelNamePrefix\n\n self.epoch = 0\n self.trainingSteps = trainingSteps\n \n self.sendNotifications = sendNotifications\n if(self.sendNotifications):\n self.notifyDevice = Notify()\n \n os.makedirs(self.saveDir, exist_ok=True)\n WriteToLog(f\"Training: {modelNamePrefix}\")\n\n def on_epoch_end(self, epoch, logs=None):\n self.epoch = epoch + 1\n if(len(logs) > 0):\n miou = logs.get(self.metric)[0]*100\n val_metric = \"val_\"+self.metric\n val_miou = logs.get(val_metric)[0]*100\n SaveModel(self.saveDir, epoch, self.model, miou, val_miou, self.modelNamePrefix)\n\n message = \"Ep: {0}. {1}: {2:.3}%. {3}: {4:.3}%\".format(self.epoch, self.metric, miou, val_metric, val_miou)\n WriteToLog(message)\n\n f = open(\"demofile3.txt\", \"w\")\n f.write(\"Woops! I have deleted the content!\")\n f.close()\n\n if(self.sendNotifications):\n try: \n self.notifyDevice.send(self.modelNamePrefix + \" \" + message)\n except:\n print(\"notifyDevice error\")\n \n # def on_batch_end(self, batch, logs=None):\n # progress = batch/self.trainingSteps * 100\n # if(progress % 10 == 0):\n # try:\n # message = \"Ep. {0} {1}% done. {2}: {3:.3}%\".format(self.epoch+1, int(progress), self.metric, logs.get(self.metric)*100)\n # self.notifyDevice.send(message)\n # except:\n # print(\"notifyDevice error\")\n\ndef ParseEpoch(modelPath):\n filename = os.path.basename(modelPath)\n return int(filename.split(\"_\")[2])\n\ndef GetValidationData(testFiles, consts, batchesCount = 100, newDataGeneration = False):\n print(\"Gathering validation data...\")\n print(f\"Test files: {testFiles}\")\n\n if(newDataGeneration):\n PrintToLog(\"Use TestSequence for validation.\")\n\n assert(len(testFiles) == 1)\n seq = TestSequence(testFiles[0], consts, test = True)\n else:\n PrintToLog(\"Use TrainSequence for validation.\")\n\n seq = TrainSequence(testFiles, batchesCount, consts, dataAugmentation = False) \n \n if not consts.noFeature:\n ftsList = np.zeros((0, consts.npoints, consts.featureComponents), np.float32)\n ptsList = np.zeros((0, consts.npoints, 3), np.float32)\n lbsList = np.zeros((0, consts.npoints, consts.classCount), np.uint8)\n\n if(newDataGeneration):\n indexes = np.arange(min(batchesCount, len(seq)))\n np.random.shuffle(indexes)\n else:\n indexes = range(batchesCount)\n\n for i in indexes:\n if consts.noFeature:\n if(newDataGeneration):\n ptslbl = seq.__getitem__(i)\n else:\n pts, lbl = seq.__getitem__(i)\n ptslbl = [pts[0], lbl]\n \n ptsList = np.concatenate((ptsList, ptslbl[0]), 0)\n lbsList = np.concatenate((lbsList, ptslbl[1]), 0)\n else:\n if(newDataGeneration):\n ftsptslbl = seq.__getitem__(i)\n else:\n ftspts, lbl = seq.__getitem__(i)\n ftsptslbl = [ftspts[0], ftspts[1], lbl]\n \n ftsList = np.concatenate((ftsList, ftsptslbl[0]), 0)\n ptsList = np.concatenate((ptsList, ftsptslbl[1]), 0)\n lbsList = np.concatenate((lbsList, ftsptslbl[2]), 0)\n \n PrintToLog(f\"Generated {len(lbsList)} validation samples.\")\n\n if consts.noFeature:\n return (ptsList, lbsList)\n else:\n return ([ftsList, ptsList], lbsList)\n \ndef TrainModel(trainFiles, testFiles, consts : Const, modelPath = None, saveDir = Paths.dataPath, classes = None, first_epoch = 0, epochs = None, sendNotifications = False): \n model = None\n modelName = None\n if(modelPath != None):\n if(not isinstance(modelPath, list)):\n modelName = Const.ParseModelName(modelPath)\n if(consts.Name() != Const.RemoveUID(modelName)):\n modelName = consts.Name(consts.UID()) \n logSaveDir = saveDir + f\"/{modelName}/\"\n\n if(isinstance(modelPath, list)):\n model = FuseModels(modelPath, consts)\n else:\n model, modified = LoadModel(modelPath, consts)\n if(not modified):\n first_epoch = ParseEpoch(modelPath) +1\n else:\n if(consts.Fusion):\n model = FuseModels(None, consts)\n else:\n model = CreateModel(consts.classCount, 1 if consts.noFeature else consts.featureComponents, noColor=consts.noFeature)\n \n if(modelName is None or modelName == \"\"):\n modelName = consts.Name(consts.UID())\n logSaveDir = saveDir + f\"/{modelName}/\"\n\n PrintToLog(\"Train {} on {} files. Test on {} files\".format(modelName, len(trainFiles), len(testFiles)))\n PrintToLog(\"Validate on :\" + str(testFiles))\n\n trainingSteps = int((1000*16)/consts.batchSize) if not Const.IsWindowsMachine() else int(10)\n PrintToLog(\"Batch size: {}, trainingSteps: {}\".format(consts.batchSize, trainingSteps))\n\n logsPath = os.path.join(consts.logsPath, Const.RemoveUID(modelName))\n os.makedirs(logsPath, exist_ok=True)\n callbacks_list = [] \n callbacks_list.append(ModelSaveCallback(logSaveDir, trainingSteps, \"curb\", modelNamePrefix = modelName, sendNotifications=sendNotifications))\n # callbacks_list.append(IOUPerClass(logsPath, consts.classNames[1:], first_epoch+1))\n # callbacks_list.append(tf.keras.callbacks.TensorBoard(log_dir=logsPath, update_freq=\"batch\", histogram_freq=0, profile_batch = 0)) # tensorboard 2.0.2\n\n seq = TrainSequence(trainFiles, trainingSteps, consts)\n validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)\n validationData = None if len(testFiles) == 0 else GetValidationData(testFiles, consts, validationSteps)\n\n if(epochs is None):\n epochs = 20 if consts.Fusion else 100\n\n model.fit(seq, validation_data = validationData, epochs = epochs, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300, callbacks=callbacks_list, initial_epoch = first_epoch)\n\ndef EvaluateModels(modelsList, testFiles, consts, x = None, y = None):\n if(x is None or y is None):\n validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)\n x, y = GetValidationData(testFiles, consts, validationSteps, newDataGeneration = False)\n\n for file in modelsList:\n model, _ = LoadModel(file, consts)\n metrics = model.evaluate(x, y, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300)\n # print(f\"miou: {metrics[2][0][0]*100:.3}\")\n\ndef SaveModel(saveDir, epoch, model, train_score, val_score=0, modelNamePrefix = \"\"):\n if(modelNamePrefix != \"\"):\n modelNamePrefix += \"_\"\n fileName = saveDir+\"/{0}{1}{2}{3}.h5\".format(modelNamePrefix, epoch, f\"_train({train_score:.3})\", f\"_val({val_score:.3})\" if val_score != 0 else \"\")\n if(not os.path.isdir(saveDir)):\n os.mkdir(saveDir) \n if(os.path.exists(fileName)):\n os.remove(fileName) \n model.save(fileName, include_optimizer=False)\n\ndef RotatePointCloud(batch_data):\n \"\"\" Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n \"\"\"\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, sinval, 0],\n [-sinval, cosval, 0],\n [0, 0, 1],])\n return np.dot(batch_data, rotation_matrix)\n\ndef JitterRGB(features):\n features = features.astype(np.uint8)\n assert(np.max(features) > 1)\n\n img = Image.fromarray(np.expand_dims(features,0), mode=\"RGB\")\n\n low = 0.4\n high = 1.6\n #1 is baseline\n img = ImageEnhance.Brightness(img).enhance(np.random.uniform(low, high))\n img = ImageEnhance.Color(img).enhance(np.random.uniform(low, high))\n img = ImageEnhance.Contrast(img).enhance(np.random.uniform(low, high))\n\n img = ImageEnhance.Sharpness(img).enhance(np.random.uniform(low, high))\n if(np.random.uniform(low, high) > 1):\n img = ImageOps.equalize(img) \n if(np.random.uniform(low, high) > 1):\n img = ImageOps.autocontrast(img)\n\n new_features = np.array(img).reshape((-1, 3))\n return new_features\n\ndef JitterReflectance(features, sigma=40): #input [0; 255]\n assert(features.shape[1] == 1)\n randJitters = np.random.randint(-sigma, sigma, size = features.shape)\n features += randJitters\n features = np.clip(features, 0, 255)\n return features\n\ndef JitterPoints(points, sigma=0.01):\n \"\"\" Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n \"\"\" \n C = 3\n assert(points.shape[1] == C)\n\n randJitters = np.random.uniform(-sigma, sigma, size = points.shape)\n return points + randJitters\n\ndef Mirror(points, axis, min = True):\n if(min):\n axisValue = np.amin(points[:,axis])\n else:\n axisValue = np.amax(points[:,axis])\n\n distances = np.abs(points[:, axis] - axisValue)\n newpoints = np.array(points, copy=True)\n\n newpoints[:,axis] = newpoints[:,axis] + distances*(-2 if min else 2)\n return newpoints\n\ndef MirrorPoints(points): \n assert(len(points.shape) == 2 and points.shape[1] == 3)\n\n mirrorDirection = random.choice([\"xMin\", \"xMax\", \"yMin\", \"yMax\", \"\"])\n\n if(mirrorDirection == \"xMin\"):\n points = Mirror(points, 0, min = True)\n elif(mirrorDirection == \"xMax\"):\n points = Mirror(points, 0, min = False)\n elif(mirrorDirection == \"yMin\"):\n points = Mirror(points, 1, min = True)\n elif(mirrorDirection == \"yMax\"):\n points = Mirror(points, 1, min = False)\n \n return points\n\ndef ScalePoints(points, sigma = 0.02):\n \"\"\" Scale up or down random by small percentage\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, scaled batch of point clouds\n \"\"\"\n assert(points.shape[1]==3)\n\n scale = np.random.uniform(1-sigma, 1+sigma)\n scale_matrix = np.array([[scale, 0, 0],\n [0, scale, 0],\n [0, 0, scale]])\n scaled = np.dot(points, scale_matrix)\n\n return scaled\n\nclass TrainSequence(Sequence):\n def __init__(self, filelist, iteration_number, consts : Const, dataAugmentation = True):\n self.filelist = filelist\n self.ptsList = [np.load(file) for file in self.filelist]\n self.ptsList = sorted(self.ptsList, key=len)\n self.ptsListCount = np.cumsum([len(pts) for pts in self.ptsList])\n\n self.cts = consts\n self.dataAugmentation = dataAugmentation\n self.iterations = iteration_number\n\n def __len__(self):\n return int(self.iterations)\n\n def PickRandomPoint(self, lbl):\n lblIdx = []\n\n while True:\n randClass = random.randint(0, self.cts.classCount-1)\n lblIdx = np.where(lbl == randClass)[0]\n\n if(len(lblIdx) >= 2):\n break\n\n return lblIdx[random.randint(0, len(lblIdx)-1)] \n\n def __getitem__(self, _):\n if not self.cts.noFeature:\n ftsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.featureComponents), np.float32) \n ptsList = np.zeros((self.cts.batchSize, self.cts.npoints, 3), np.float32)\n lbsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.classCount), np.uint8)\n \n for i in range(self.cts.batchSize):\n # load the data\n ptIdx = random.randint(0, self.ptsListCount[-1])\n pts = self.ptsList[np.argmax(self.ptsListCount >= ptIdx)]\n \n # if(self.cts.featureComponents == 1):\n # keepPts = (pts[:, 4] != 0)\n # else:\n # keepPts = (pts[:, 6] != 0)\n # pts = pts[keepPts]\n\n # get the features\n if(self.cts.featureComponents == 1):\n if not self.cts.noFeature: \n fts = np.expand_dims(pts[:,3], 1).astype(np.float32)\n lbs = pts[:,4].astype(int)\n else:\n if not self.cts.noFeature:\n fts = pts[:,3:6].astype(np.float32)\n lbs = pts[:,6].astype(int)\n\n if(np.min(lbs) == 1):\n lbs -= 1 #class 0 is filtered out\n \n # get the point coordinates\n pts = pts[:, :3]\n\n # pick a random point\n pt_id = random.randint(0, pts.shape[0]-1)\n pt = pts[pt_id]\n\n # create the mask\n mask_x = np.logical_and(pts[:,0]<pt[0]+self.cts.blocksize/2, pts[:,0]>pt[0]-self.cts.blocksize/2)\n mask_y = np.logical_and(pts[:,1]<pt[1]+self.cts.blocksize/2, pts[:,1]>pt[1]-self.cts.blocksize/2)\n mask = np.logical_and(mask_x, mask_y)\n temppts = pts[mask]\n templbs = lbs[mask]\n if not self.cts.noFeature:\n tempfts = fts[mask]\n \n # random selection\n choice = np.random.choice(temppts.shape[0], self.cts.npoints, replace=True)\n temppts = temppts[choice] \n if not self.cts.noFeature: \n tempfts = tempfts[choice]\n\n templbs = templbs[choice]\n encodedLbs = np.zeros((len(templbs), self.cts.classCount))\n encodedLbs[np.arange(len(templbs)),templbs] = 1\n templbs = encodedLbs\n\n # if self.dataAugmentation:\n # dt = DataTool()\n # dt.VisualizePointCloudAsync([temppts], [tempfts/255])\n\n # data augmentation\n if self.dataAugmentation:\n if(self.cts.Mirror):\n temppts = MirrorPoints(temppts)\n if(self.cts.Rotate):\n temppts = RotatePointCloud(temppts)\n if(self.cts.Scale):\n temppts = ScalePoints(temppts, sigma = 0.02)\n if(self.cts.Jitter):\n temppts = JitterPoints(temppts, sigma = 0.01)\n\n if(not self.cts.noFeature and self.cts.FtrAugment):\n if(self.cts.featureComponents == 3):\n tempfts = JitterRGB(tempfts)\n elif(self.cts.featureComponents == 1):\n tempfts = JitterReflectance(tempfts)\n \n if(not self.cts.noFeature):\n tempfts = tempfts.astype(np.float32)\n tempfts = tempfts/255 # - 0.5\n \n # if self.dataAugmentation:\n # # visualize data\n # dt = DataTool()\n # dt.VisualizePointCloud([temppts], [tempfts], windowName = \"Augmented\")\n # linePoints = np.where(templbs[:, 1] == 1)[0]\n # DataTool().VisualizePointCloud([np.delete(temppts, linePoints, axis=0), temppts[linePoints]], [[0,0,1], [1,0,0]], windowName=\"Sampled\")\n\n if not self.cts.noFeature:\n ftsList[i] = np.expand_dims(tempfts, 0)\n ptsList[i] = np.expand_dims(temppts, 0)\n lbsList[i] = np.expand_dims(templbs, 0)\n \n if self.cts.noFeature:\n return [ptsList], lbsList\n else: # works for RGB and fusion models\n return [ftsList, ptsList], lbsList\n\nclass TestSequence(Sequence):\n def __init__(self, filename, consts, splitDataSetToParts = -1, windowsMachineCap = True, test = False):\n self.filename = filename\n self.batchSize = consts.batchSize\n self.npoints = consts.npoints\n self.nocolor = consts.noFeature\n self.bs = consts.blocksize\n self.featureComponents = consts.featureComponents\n self.fusion = consts.Fusion\n self.test = test\n\n if(self.test):\n self.classCount = consts.classCount\n self.lbl = []\n\n if(self.filename.endswith(\".ply\")):\n from plyfile import PlyData\n plydata = PlyData.read(self.filename)\n x = plydata[\"vertex\"].data[\"x\"].astype(np.float32)\n y = plydata[\"vertex\"].data[\"y\"].astype(np.float32)\n z = plydata[\"vertex\"].data[\"z\"].astype(np.float32)\n fts = plydata[\"vertex\"].data[\"reflectance\"].astype(np.float32)\n self.xyzrgb = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1), np.expand_dims(fts, 1)), axis=1)\n elif(self.filename.endswith(\".npy\")):\n xyzftsl = np.load(self.filename)\n if(xyzftsl.shape[1] == 5):\n self.xyzrgb = xyzftsl[:, :4]\n if(self.test):\n self.lbl = xyzftsl[:, 4] - 1\n else: #if(xyzftsl.shape[1] == 7):\n self.xyzrgb = xyzftsl[:, :6]\n if(self.test):\n self.lbl = xyzftsl[:, 6] - 1\n elif(self.filename.endswith(\".las\")):\n from dataTool import ReadXYZRGB \n xyz, rgb = ReadXYZRGB(self.filename)\n self.xyzrgb = np.concatenate((xyz, rgb), 1)\n\n print(\"Test_step:\", consts.test_step)\n step = consts.test_step\n discretized = ((self.xyzrgb[:,:2]).astype(float)/step).astype(int)\n self.allpts = np.unique(discretized, axis=0)\n self.allpts = self.allpts.astype(np.float)*step\n\n if(consts.IsWindowsMachine() and windowsMachineCap):\n self.allpts = self.allpts[:115] #small sample for testing\n\n self.splitDataSetToParts = splitDataSetToParts\n if(self.splitDataSetToParts != -1):\n self.ptIndex = 0\n else:\n self.pts = self.allpts\n self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)\n\n self.sparseCubes = 0\n self.sparseCubesPtCount = 0\n\n def LenParts(self):\n if(self.splitDataSetToParts != -1):\n return math.ceil(len(self.allpts)/self.splitDataSetToParts)\n else:\n return 1\n\n def NextPart(self):\n if(self.splitDataSetToParts <= 0):\n return False\n if(self.ptIndex >= len(self.allpts)):\n return False\n\n self.nextIndex = np.min([self.ptIndex+self.splitDataSetToParts, len(self.allpts)])\n self.pts = self.allpts[self.ptIndex : self.nextIndex]\n self.ptIndex = self.nextIndex\n\n self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)\n return True\n\n def __len__(self):\n return math.ceil(len(self.pts)/self.batchSize)\n\n def compute_mask(self, pt, bs):\n # build the mask\n mask_x = np.logical_and(self.xyzrgb[:,0]<pt[0]+bs/2, self.xyzrgb[:,0]>pt[0]-bs/2)\n mask_y = np.logical_and(self.xyzrgb[:,1]<pt[1]+bs/2, self.xyzrgb[:,1]>pt[1]-bs/2)\n mask = np.logical_and(mask_x, mask_y)\n return mask \n\n def __getitem__(self, index):\n size = min(self.batchSize, len(self.pts) - (index * self.batchSize))\n\n if not self.nocolor:\n ftsList = np.zeros((size, self.npoints, self.featureComponents), np.float32)\n ptsList = np.zeros((size, self.npoints, 3), np.float32)\n if(self.test):\n lblList = np.zeros((size, self.npoints, self.classCount), np.float32)\n \n for i in range(size):\n # get the data \n mask = self.compute_mask(self.pts[index*self.batchSize + i], self.bs)\n pts = self.xyzrgb[mask]\n\n if(self.test):\n lbl = self.lbl[mask]\n\n if(len(pts) < self.npoints):\n self.sparseCubes += 1\n self.sparseCubesPtCount += len(pts)\n\n # choose right number of points\n choice = np.random.choice(pts.shape[0], self.npoints, replace=True)\n pts = pts[choice]\n if(self.test):\n lbl = lbl[choice]\n\n # labels will contain indices in the original point cloud\n idx = np.where(mask)[0][choice]\n self.idxList[index*self.batchSize + i] = np.expand_dims(idx, 0)\n\n # separate between features and points\n if not self.nocolor:\n if(self.featureComponents == 1):\n fts = np.expand_dims(pts[:,3], 1)\n else:\n fts = pts[:,3:6]\n fts = fts/255 #- 0.5\n\n pts = pts[:, :3].copy()\n\n if not self.nocolor:\n ftsList[i] = np.expand_dims(fts, 0)\n ptsList[i] = np.expand_dims(pts, 0)\n if self.test:\n lblList[i, np.arange(len(lbl)), lbl.astype(int)] = 1\n\n add_lbl = []\n if self.test:\n add_lbl = [lblList]\n\n if self.nocolor:\n return [ptsList] + add_lbl\n else: #works for RGB\n return [ftsList, ptsList] + add_lbl\n\ndef GenerateData(modelPath, testFiles, consts, outputFolder, NameIncludeModelInfo = False):\n model, _ = LoadModel(modelPath, consts)\n\n if(not NameIncludeModelInfo):\n outputFolder = os.path.join(outputFolder, Paths.FileName(modelPath))\n os.makedirs(outputFolder, exist_ok=True)\n\n for file in testFiles:\n t = time()\n\n baseName = Paths.FileName(file)\n if(NameIncludeModelInfo):\n baseName = baseName + \"_\" + Paths.FileName(modelPath)\n baseName += \".txt\"\n\n newFile = os.path.join(outputFolder, baseName)\n if(os.path.exists(newFile)):\n print(\"All ready exists: \",newFile)\n continue\n else:\n open(newFile, \"a\").close()\n\n print(\"Generating: \", newFile)\n GenerateFile(model, file, consts, newFile)\n print(\"Done in {:02d}:{:02d} min.\".format(int((time() - t)/60), int((time() - t)%60)))\n\ndef GenerateLargeData(modelPath, voxelFiles, consts, outputFolder, orgFiles = None, replace = False, Upscale = True, NameIncludeModelInfo = False):\n from time import time\n\n model, _ = LoadModel(modelPath, consts)\n\n if(not NameIncludeModelInfo):\n outputFolder = outputFolder + Paths.FileName(modelPath)\n if not Upscale:\n outputFolder = outputFolder+\"/vox_lbl/\"\n os.makedirs(outputFolder, exist_ok=True)\n\n if isinstance(voxelFiles, str):\n voxelFiles = Paths.GetFiles(voxelFiles)\n \n if isinstance(orgFiles, str):\n orgFiles = Paths.GetFiles(orgFiles)\n \n for voxelFile in voxelFiles:\n baseName = Paths.FileName(voxelFile).replace(\"_voxels\", \"\")\n\n if not (orgFiles is None):\n orgFile = [f for f in orgFiles if Paths.FileName(f).startswith(baseName)]\n if(len(orgFile) != 1):\n print(\"Skip: \", voxelFile)\n continue\n orgFile = orgFile[0]\n else:\n orgFile = None\n\n t = time()\n\n if(NameIncludeModelInfo):\n baseName = baseName + \"_\" + Paths.FileName(modelPath)\n \n if Upscale: \n newFile = os.path.join(outputFolder, baseName+\".labels\")\n else: \n newFile = os.path.join(outputFolder, baseName+\".npy\")\n if(not replace and os.path.exists(newFile)):\n print(newFile,\" already exists.\")\n continue\n \n flagFile = newFile+\".tmp\"\n if(os.path.exists(flagFile)):\n print(\"Other worker is generating: \", newFile)\n continue\n else:\n open(flagFile, \"a\").close()\n\n print(\"Generating: \", newFile)\n GenerateLargeFile(model, voxelFile, orgFile, consts, newFile, Upscale = Upscale)\n\n os.remove(flagFile)\n print(\"{} generated in {:02d}:{:02d} min.\".format(baseName, int((time() - t)/60), int((time() - t)%60)))\n\ndef GenerateFile(model, file, consts, outputFile, saveScores = True):\n seq = TestSequence(file, consts)\n output = model.predict(seq, workers = consts.batchSize, max_queue_size = 300, verbose = 1)\n\n # for y in range(len(seq)):\n # pts = seq.__getitem__(y)\n # pts = pts[0]\n # pred = model.predict(pts)\n\n # for i in range(len(pred)):\n # predPtsIdx = np.where(np.argmax(pred[i], axis = 1) == 1)[0]\n # # truePtsIdx = np.where(np.argmax(lbl[i], axis = 1) == 1)[0]\n \n # # print(f\"True curb points: {len(truePtsIdx)}. Found curb points: {len(predPtsIdx)}\")\n # DataTool().VisualizePointCloud([np.delete(pts[i], predPtsIdx, axis=0), pts[i][predPtsIdx]], [[0,0,1], [1,0,0]])\n\n idx = seq.idxList\n xyzrgb = seq.xyzrgb[:,:3]\n scores = np.zeros((xyzrgb.shape[0], consts.classCount))\n\n for i in range(len(output)):\n scores[idx[i]] += output[i] \n\n mask = np.logical_not(scores.sum(1)==0)\n scores = scores[mask]\n pts_src = xyzrgb[mask]\n\n # create the scores for all points\n indexes = nearest_correspondance(pts_src.astype(np.float32), xyzrgb.astype(np.float32), K=1)\n scores = scores[indexes]\n \n if saveScores:\n scoresFile = outputFile.replace(\".txt\", \"_scores.npy\")\n np.save(scoresFile, scores)\n print(f\"Scores saved to: {scoresFile}\")\n \n scores = scores.argmax(1) + 1 #because all class are shifted to avoid 0 - unclassified\n \n print(f\"class 0: {len(np.where(scores == 0)[0])}, class 1: {len(np.where(scores == 1)[0])}\")\n\n import pandas as pd\n print(\"Save labels: \", scores.shape)\n pd.DataFrame(scores, dtype=np.uint8).to_csv(outputFile, sep='\\t', header=None, index=None)\n\ndef SaveLabelsPnts(labels, outputFile):\n import pandas as pd \n print(\"Saving pts lbs...\")\n if(len(labels.shape) == 1):\n pd.DataFrame(labels, dtype=np.uint8).to_csv(outputFile, sep='\\t', header=None, index=None)\n else:\n np.save(outputFile, labels)\n print(\"Pts lbs {} saved!\".format(labels.shape))\n\ndef UpscaleToOriginal(originalPoints, pts_src, lbl, outputFile = None):\n from tqdm import tqdm\n # create the scores for all points\n step = 10000000 #1000000\n fullLbl = np.zeros((0,), np.int8)\n print(\"KDTree magic. Source pts: {}. Queary pts: {}\".format(len(pts_src), len(originalPoints)))\n for i in tqdm(range(0, math.ceil(len(originalPoints)/step))):\n a = i*step\n b = a + np.min([len(originalPoints)-a, step])\n indexes = nearest_correspondance(pts_src, originalPoints[a:b], K=1)\n fullLbl = np.concatenate([fullLbl, lbl[indexes]], 0)\n\n if(not (outputFile is None)):\n SaveLabelsPnts(fullLbl, outputFile)\n else:\n return fullLbl\n\ndef GenerateLargeFile(model, voxelFile, originalFile, consts, outputFile, Upscale = True, saveScores = True):\n from dataTool import ReadXYZ\n from tqdm import tqdm\n\n seq = TestSequence(voxelFile, consts, splitDataSetToParts=16000)\n print(\"All pts: \", len(seq.allpts))\n\n xyzrgb = seq.xyzrgb[:,:3]\n scores = np.zeros((xyzrgb.shape[0], consts.classCount))\n\n for _ in tqdm(range(seq.LenParts())):\n seq.NextPart()\n output = model.predict(seq, workers = consts.batchSize, max_queue_size = 300, verbose = 1)\n\n idx = seq.idxList\n for i in range(len(output)):\n scores[idx[i]] += output[i]\n\n mask = np.logical_not(scores.sum(1)==0)\n scores = scores[mask]\n pts_src = xyzrgb[mask].astype(np.float32)\n\n if saveScores:\n scoresFile = os.path.splitext(outputFile)[0]+\"_scores.npy\"\n np.save(scoresFile, scores)\n print(f\"Scores saved to: {scoresFile}\")\n\n lbl = scores.argmax(1)\n \n if(Upscale and not (originalFile is None)):\n print(\"Load original file: \", originalFile)\n originalPoints = ReadXYZ(originalFile).astype(np.float32)\n assert(originalPoints.shape[1] == 3)\n UpscaleToOriginal(originalPoints, pts_src, lbl, outputFile)\n else: \n SaveLabelsPnts(np.concatenate([pts_src, np.expand_dims(lbl, 1)], axis=1), outputFile)\n\ndef UpscaleFilesAsync(modelPath, voxelFolder, orgFolder, savePath):\n import time\n # notifyDevice = Notify()\n\n savePath = savePath + Paths.FileName(modelPath)\n\n print(f\"Searching in folder: {savePath+'/vox_lbl/'}\")\n\n while True:\n found = False\n\n fileNames = Semantic3D.fileNames\n for file in Paths.GetFiles(savePath, onlyNames=True, withoutExtension=True, findExtesions=('.labels')):\n if(file in fileNames or fileNames.values()):\n fileNames = {key:val for key, val in fileNames.items() if val != file and key != file}\n \n if(len(fileNames) == 0): \n print(\"Done upscaling files\")\n # notifyDevice.send(\"Done upscaling files\")\n return\n\n for file in Paths.GetFiles(savePath+\"/vox_lbl/\", onlyNames=True, withoutExtension=True, findExtesions=('.npy')): \n ptslbs = os.path.join(savePath+\"/vox_lbl/\", file+\".npy\")\n # originalFile = os.path.join(orgFolder, file+\".npy\")\n originalFile = os.path.join(orgFolder, file+\".hdf5\")\n outputFile = os.path.join(savePath, file+\".labels\")\n\n if(not os.path.exists(outputFile)):\n found = True\n open(outputFile, \"a\").close()\n UpscaleFile(ptslbs, originalFile, outputFile)\n \n if not found:\n time.sleep(10) #sleep for 10 second and scan for job again\n\ndef UpscaleFile(ptslbsFile, originalFile, outputFile):\n from dataTool import ReadLabels, ReadXYZ\n\n print(\"Upscaling: {}\".format(ptslbsFile))\n scores = ReadLabels(ptslbsFile, readFormat = \".npy\")\n scores = np.squeeze(scores, 1)\n pts_src = ReadXYZ(ptslbsFile, readFormat = \".npy\")\n originalPoints = ReadXYZ(originalFile)\n\n UpscaleToOriginal(originalPoints, pts_src, scores, outputFile)\n\ndef nearest_correspondance(pts_src, pts_dest, K=1):\n # print(\"KDTree magic. Source pts: {}. Queary pts: {}\".format(len(pts_src), len(pts_dest)))\n # t = time()\n kdt = KDTree(pts_src, leaf_size=20)\n _, indexes = kdt.query(pts_dest, k = K)\n # print(\"Done in {}:{} min.\".format(int((time() - t)/60), int((time() - t)%60))) \n return np.squeeze(indexes, 1)\n\ndef TestTestSequence(path, consts): \n seq = TestSequence(path, consts)\n\n allPts = np.zeros((len(seq.xyzrgb), 3))\n\n for i in range(len(seq)):\n inpt = seq[i]\n\n ftsList = inpt[0]\n ptsList = inpt[1]\n\n for j in range(len(ptsList)):\n allPts[seq.idxList[i*consts.batchSize + j]] = ptsList[j]\n \n emptyPts = np.logical_not(allPts.sum(1) != 0)\n\n print(\"sparseCubes: \",seq.sparseCubes)\n print(\"mean sparseCubes pt count: \", seq.sparseCubesPtCount/seq.sparseCubes)\n print(\"Not picked points: {} => {:.2f}%\".format(len(emptyPts), len(emptyPts)/len(allPts)))\n\n nonEmptyPts = np.logical_not(emptyPts)\n\n a = seq.xyzrgb[emptyPts]\n b = seq.xyzrgb[nonEmptyPts]\n\n dt = DataTool()\n dt.VisualizePointCloud([a, b], [[1,0,0], None])\n\nif(os.path.exists(\"C:/Program Files\")):\n import open3d as o3d\n import time\n from dataTool import LoadRenderOptions, SaveRenderOptions, GetPointsIndexInBoundingBox, GetPointsInBoundingBox\n\nclass BoxesIterator:\n def __init__(self, boxes, points, colors, labels):\n # self.pc = o3d.geometry.PointCloud()\n # self.pc.points = o3d.utility.Vector3dVector(points)\n self.src_points = points\n self.src_colors = colors if np.max(colors) <= 1 else colors/255\n self.src_labels = labels\n self.dst_points = np.zeros((0, 3), dtype = np.float)\n self.dst_colors = np.zeros((0, 3), dtype = np.float)\n self.boxes = boxes\n self.i = 0\n # self.kdt = KDTree(points, leaf_size=20) \n\n self.trajectory = None\n # if(os.path.exists(\"./data/camera_trajectory.json\")):\n # self.trajectory = o3d.io.read_pinhole_camera_trajectory(\"./data/camera_trajectory.json\").parameters\n # self.trajectory_i = 0\n # self.trajectory_time = time.time()\n\n grey = np.array([128, 128, 128])/255\n red = np.array([136, 0, 1])/255\n mint = np.array([170, 255, 195])/255\n teal = np.array([0, 128, 128])/255\n green = np.array([60, 180, 75])/255\n verygreen = np.array([0, 255, 0])/255\n brown = np.array([170, 110, 40])/255\n # white = np.array([255, 255, 255])/255\n black = np.array([0, 0, 0])/255\n blue = np.array([0, 0, 255])/255 \n pink = np.array([255, 56, 152])/255 \n\n #NPM3D\n self.colors = []\n if(np.max(self.src_labels) == 9):\n self.colors = [grey, red, blue, teal, mint, brown, pink, black, green]\n #Semantic3D\n elif(np.max(self.src_labels) == 8):\n self.colors = [grey, verygreen, green, mint, red, blue, brown, black]\n \n self.pc = o3d.geometry.PointCloud() \n self.pc.points = o3d.utility.Vector3dVector(self.src_points)\n\n self.box = o3d.geometry.LineSet()\n lines = np.array([[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],[0, 4], [1, 5], [2, 6], [3, 7]])\n self.box.lines = o3d.utility.Vector2iVector(lines)\n self.box.colors = o3d.utility.Vector3dVector(np.array([[1,0,0] for _ in range(len(lines))]))\n\n self.initSet = False\n\n def ColorPtsByClass(self, pts, lbl):\n pts_colors = np.zeros((len(pts), 3), np.float)\n\n for i in range(0, len(self.colors)):\n indexes = np.where(lbl == i+1)[0]\n pts_colors[indexes] = self.colors[i]\n\n return pts_colors\n \n def BoxPts(self, bBox):\n box = [[bBox[0], bBox[2], bBox[4]], \n [bBox[1], bBox[2], bBox[4]], \n [bBox[0], bBox[3], bBox[4]], \n [bBox[1], bBox[3], bBox[4]],\n [bBox[0], bBox[2], bBox[5]], \n [bBox[1], bBox[2], bBox[5]], \n [bBox[0], bBox[3], bBox[5]], \n [bBox[1], bBox[3], bBox[5]]]\n return np.array(box)\n\n def AnimationFunction(self, vis):\n # time.sleep(0.2)\n if(self.i < len(self.boxes)): \n pts = self.src_points[:, :2]\n mask_x = np.logical_and(self.boxes[self.i][0]<pts[:,0], pts[:,0]<self.boxes[self.i][1])\n mask_y = np.logical_and(self.boxes[self.i][2]<pts[:,1], pts[:,1]<self.boxes[self.i][3])\n ptsIdx = np.where(np.logical_and(mask_x, mask_y))[0]\n randIdx = np.random.choice(ptsIdx, min(8192, len(ptsIdx)), replace=False)\n \n self.dst_points = np.concatenate((self.dst_points, self.src_points[randIdx]), axis = 0)\n self.dst_colors = np.concatenate((self.dst_colors, self.ColorPtsByClass(self.src_points[randIdx], self.src_labels[randIdx])), axis = 0)\n\n self.src_points = np.delete(self.src_points, randIdx, axis = 0)\n self.src_labels = np.delete(self.src_labels, randIdx, axis = 0)\n self.src_colors = np.delete(self.src_colors, randIdx, axis = 0)\n \n self.pc.points = o3d.utility.Vector3dVector(np.concatenate((self.src_points, self.dst_points), 0))\n self.pc.colors = o3d.utility.Vector3dVector(np.concatenate((self.src_colors, self.dst_colors), 0))\n\n self.box.points = o3d.utility.Vector3dVector(self.BoxPts(self.boxes[self.i]))\n\n vis.clear_geometries()\n vis.add_geometry(self.pc, False)\n vis.add_geometry(self.box, False)\n \n self.i += 1 \n # print(f\"{self.i}/{len(self.boxes)}\", end=\"\\r\")\n else:\n print(\"Iteration over.\")\n\n if(not os.path.exists(\"./data/camera_trajectory.json\")):\n self.trajectory = None\n\n if(self.trajectory is None):\n # vis = LoadRenderOptions(vis, returnVis=True)\n if(os.path.exists(\"./data/camera_trajectory.json\")):\n self.trajectory = o3d.io.read_pinhole_camera_trajectory(\"./data/camera_trajectory.json\").parameters\n self.trajectory_i = 0\n self.trajectory_time = time.time() \n else:\n ctr = vis.get_view_control()\n ctr.convert_from_pinhole_camera_parameters(self.trajectory[self.trajectory_i])\n if(self.trajectory_i < len(self.trajectory)-1): #and time.time() - self.trajectory_time > 1\n print(f\"Trajectory: {self.trajectory_i}/{len(self.trajectory)}\", end=\"\\r\")\n self.trajectory_i += 1\n self.trajectory_time = time.time()\n\n return False\n\ndef ShowSequenceBoxes(ptsFile, lblFile, consts):\n from dataTool import DataTool\n\n consts.test_step = 4\n seq = TestSequence(ptsFile, consts, windowsMachineCap=False)\n\n minZ = np.min(seq.xyzrgb[:,2])\n maxZ = np.max(seq.xyzrgb[:,2])\n\n boxes = []\n for pt in seq.pts:\n minX = pt[0] - consts.blocksize/2\n maxX = pt[0] + consts.blocksize/2\n \n minY = pt[1] - consts.blocksize/2\n maxY = pt[1] + consts.blocksize/2\n\n boxes.append([minX, maxX, minY, maxY, minZ, maxZ])\n\n dt = DataTool()\n # dt.VisualizePointCloud([seq.xyzrgb[:,:3]], [seq.xyzrgb[:,3:6]], bBoxes = boxes)\n boxesitr = BoxesIterator(boxes, seq.xyzrgb[:,:3], seq.xyzrgb[:,3:], np.squeeze(ReadLabels(lblFile),1))\n dt.VisualizePointCloud([seq.xyzrgb[:,:3]], animationFunction=boxesitr.AnimationFunction)\n # dt.VisualizePointCloud([seq.xyzrgb[:,:3]])\n\ndef RunExperiments():\n from dataTool import VisualizePointCloudClassesAsync, VisualizePointCloudClasses, ReadLabels, DataTool, ReadXYZ\n # testCloud = \"G:/PointCloud DataSets/NPM3D/test_10_classes/ajaccio_2.ply\"\n # testCloud = consts.Paths.processedTrain+\"/Lille1_1_0.npy\"\n # VisualizePointCloudClassesAsync(testCloud, downSample=False, windowName=\"Keras\")\n # VisualizePointCloudClassesAsync(testCloud, \"G:/PointCloud DataSets/NPM3D/generatedResults/ajaccio_2.txt\", downSample=False, windowName=\"Keras\")\n # VisualizePointCloudClassesAsync(testCloud, \"G:/PointCloud DataSets/NPM3D/torch_generated_data/results88.2%/ajaccio_2.txt\", downSample=False, windowName=\"Torch\")\n\n # TestTestSequence(consts.Paths.processedTrain+\"/Lille1_1_0.npy\", consts)\n # ShowSequenceBoxes(consts.Paths.processedTrain+\"/Lille1_1_0.npy\", consts)\n\n # # pts = ReadXYZ(consts.Paths.processedTrain+\"/Lille2_0.npy\")\n # true = ReadLabels(consts.Paths.processedTrain+\"/Lille2_0.npy\")\n\n # # pts = ReadXYZ(consts.Paths.rawTrain+\"/untermaederbrunnen_station3_xyz_intensity_rgb.hdf5\")\n # # true = ReadLabels(consts.Paths.rawTrain+\"/untermaederbrunnen_station3_xyz_intensity_rgb.hdf5\")\n\n # # pred_file = \"G:/PointCloud DataSets/NPM3D/torch_generated_data/results88.2%/Lille2_0.txt\"\n # pred_file = consts.Paths.generatedTest+\"/\"+Paths.FileName(modelPath)+\"/Lille2_0.txt\"\n # # pred_file = consts.Paths.generatedTest+\"/\"+Paths.FileName(modelPath)+\"/untermaederbrunnen_station3_xyz_intensity_rgb.labels\"\n # pred = ReadLabels(pred_file)\n \n # VisualizePointCloudClasses(consts.Paths.processedTrain+\"/Lille2_0.npy\",\n # pred_file,\n # downSample=False, windowName=\"Red error\",\n # errorPoints = ((true != pred) == (true != 0)),\n # delPoints = (true == 0))\n\n # error = np.where(true == 0)[0]\n # true = np.delete(true, error, 0)\n # pred = np.delete(pred, error, 0)\n\n # from sklearn.metrics import confusion_matrix\n # import metrics\n # cm = confusion_matrix(true, pred, labels=list(range(consts.classCount)))\n # iou = metrics.stats_iou_per_class(cm)\n # print(\"Mean iou:\", iou[0])\n # print(\"iou per class:\", iou[1])\n\n from dataTool import ReadXYZ, ReadLabels\n from sklearn.metrics import confusion_matrix\n from metrics import stats_accuracy_per_class, stats_iou_per_class\n\n src_pts = ReadXYZ(r\"G:\\PointCloud DataSets\\semantic3d\\rawTrain\\bildstein_station3_xyz_intensity_rgb.hdf5\")\n src_lbl = ReadLabels(r\"G:\\PointCloud DataSets\\semantic3d\\rawTrain\\bildstein_station3_xyz_intensity_rgb.hdf5\")\n src_lbl = np.squeeze(src_lbl, 1)\n\n delIndices = np.where(src_lbl == 0)\n src_pts = np.delete(src_pts, delIndices, axis=0)\n src_lbl = np.delete(src_lbl, delIndices, axis=0)\n\n voxel_pts = ReadXYZ(r\"G:\\PointCloud DataSets\\semantic3d\\processedTrain(0.15m)\\bildstein_station3_xyz_intensity_rgb_voxels.npy\")\n voxel_lbl = ReadLabels(r\"G:\\PointCloud DataSets\\semantic3d\\processedTrain(0.15m)\\bildstein_station3_xyz_intensity_rgb_voxels.npy\")\n voxel_lbl = np.squeeze(voxel_lbl, 1)\n\n upscaled_lbl = UpscaleToOriginal(src_pts, voxel_pts, voxel_lbl)\n\n cm = confusion_matrix(src_lbl, upscaled_lbl)\n avg_acc, avg_class = stats_accuracy_per_class(cm)\n avg_iou, avg_iou_class = stats_iou_per_class(cm)\n\ndef RenameSemantic3DFiles(folder):\n if(len(Paths.GetFiles(folder, findExtesions = \".labels\")) == 0):\n print(\"No files found.\")\n return\n\n for file in Paths.GetFiles(folder, findExtesions = \".labels\"):\n if(Paths.FileName(file).endswith(\"(1)\")):\n os.remove(file)\n else:\n name = Paths.FileName(file)\n newFileName = file.replace(name, Semantic3D.fileNames[name])\n os.rename(file, newFileName)\n\n if(os.path.getsize(newFileName) == 0):\n print(f\"{newFileName} if 0 bytes size\")\n \n if(len(Paths.GetFiles(folder, findExtesions = \".labels\")) != 15):\n print(\"Wrong number of files.\")\n else:\n print(\"Done renaming: \", folder)\n\nif __name__ == \"__main__\":\n from NearestNeighbors import NearestNeighborsLayer, SampleNearestNeighborsLayer\n from KDTree import KDTreeLayer, KDTreeSampleLayer\n modelPath = None\n\n # consts = NPM3D()\n # consts = Semantic3D()\n consts = Curbs()\n\n consts.noFeature = True\n # consts.Fusion = True\n # consts.Scale = True\n consts.Rotate = True\n # consts.Mirror = True\n # consts.Jitter = True\n # consts.FtrAugment = True\n\n testFiles = consts.TestFiles()\n trainFiles = consts.TrainFiles()\n\n modelPath = \"Sem3D(vox)(fusion)(FullAugment)_3_train(86.2)_val(79.5).h5\"\n # modelPath = \"Curbs(7&1)(noFeature)(Rotate)_21bdbe6aa82d4e259526ab46577e795a_25_train(75.1)_val(60.7).h5\"\n # modelPath = [\"Sem3D(vox)(RGB)(FullAugment)_55_train(85.7)_val(79.9)\", \"Sem3D(NOCOL)_50_train(87.4)_val(69.1)\"]\n # modelPath = [\"NPM3D(80&5)(RGB)(NoScale)_28_train(88.3)_val(73.2).h5\", \"NPM3D(80&5)(NOCOL)(FullAugment)_28_train(87.3)_val(71.5).h5\"]\n # modelPath = LatestModel(\"Sem3D(14&1)(noFeature)(Scale)(Rotate)(Mirror)(Jitter)\")\n # modelPath = LatestModel(consts.Name()) \n\n if(isinstance(modelPath,list)):\n consts.Fusion = True\n\n if(not consts.Fusion and not Const.IsWindowsMachine()):\n tf.config.optimizer.set_jit(True) #Gives more than 10% boost!!!\n print(\"XLA enabled.\")\n\n # modelPath = [\"Sem3D(14&1)(noFeature)(Scale)(Rotate)(Mirror)(Jitter)_9bbee708a7814063af9d85070452abd8_59_train(85.2)_val(72.8)\", \n # \"Sem3D(14&1)(noFeature)(Rotate)(Mirror)(Jitter)_ff2eb229084247d9a1c63caa519e9890_58_train(84.9)_val(75.5)\",\n # \"Sem3D(14&1)(noFeature)_dffc17f77e924894bbdbdad818ab6994_40_train(85.1)_val(68.8)\"]\n # EvaluateModels([modelPath], testFiles, consts)\n\n TrainModel(trainFiles, testFiles, consts, modelPath = modelPath)# , epochs = 8) #continue train\n # TrainModel(trainFiles, testFiles, consts) #new model\n\n # modelPath = HighestValMIOUModel(\"NPM3D(80&5)(fusion)(FullAugment)\")\n\n #NPM3D\n # GenerateData(modelPath, Paths.GetFiles(consts.Paths.rawTest), consts, consts.Paths.generatedTest)\n \n #Semantic3D \n # GenerateLargeData(modelPath, Paths.Semantic3D.processedTest, Paths.Semantic3D.rawTest, consts, consts.Paths.generatedTest, Upscale=False)\n # UpscaleFilesAsync(modelPath, Paths.Semantic3D.processedTest, Paths.Semantic3D.rawTest, Paths.Semantic3D.generatedTest)\n # RenameSemantic3DFiles(Paths.Semantic3D.generatedTest + Paths.FileName(modelPath))\n\n #Curbs\n EvaluateModels([modelPath], testFiles, consts)\n # GenerateData(modelPath, testFiles, consts, consts.Paths.pointCloudPath+\"/generated/\")\n GenerateLargeData(modelPath, testFiles, consts, consts.Paths.pointCloudPath+\"/generated/\")"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.dot",
"tensorflow.linalg.diag_part",
"tensorflow.concat",
"numpy.expand_dims",
"numpy.amax",
"tensorflow.zeros",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.reduce_sum",
"numpy.squeeze",
"tensorflow.cast",
"sklearn.neighbors.KDTree",
"tensorflow.summary.create_file_writer",
"tensorflow.equal",
"tensorflow.keras.losses.categorical_crossentropy",
"numpy.concatenate",
"numpy.max",
"pandas.DataFrame",
"numpy.where",
"tensorflow.summary.scalar",
"numpy.random.randint",
"tensorflow.keras.layers.Dropout",
"tensorflow.random_uniform_initializer",
"numpy.clip",
"numpy.unique",
"numpy.save",
"numpy.sin",
"tensorflow.stop_gradient",
"tensorflow.math.reduce_sum",
"numpy.argmax",
"numpy.load",
"numpy.zeros",
"numpy.logical_not",
"tensorflow.matmul",
"tensorflow.gather_nd",
"numpy.min",
"numpy.amin",
"numpy.random.choice",
"numpy.delete",
"numpy.random.rand",
"numpy.array",
"numpy.logical_and",
"tensorflow.nn.relu",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"numpy.abs",
"tensorflow.constant",
"tensorflow.initializers.lecun_normal",
"tensorflow.reshape",
"tensorflow.ones_like",
"numpy.cos",
"tensorflow.expand_dims",
"numpy.random.shuffle",
"tensorflow.constant_initializer",
"tensorflow.math.maximum",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.optimizers.Adam",
"tensorflow.math.confusion_matrix",
"numpy.random.uniform",
"tensorflow.config.optimizer.set_jit",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
neblar/numpynn | [
"b33c5f671c8e835b55ed775ababa358e14c987bc"
] | [
"test/test_conv1dt_6.py"
] | [
"import torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom ..src.Conv1DT_6 import Conv1DT as NumpyConv1DT\n\n\nclass Tester:\n conv1dt_numpy = NumpyConv1DT()\n\n def y_torch(self, x, weight, bias, stride, padding):\n x = torch.tensor(x)\n weight = torch.tensor(weight)\n bias = torch.tensor(bias)\n return F.conv_transpose1d(x, weight, bias, stride, padding).numpy()\n\n def y_numpy(self, x, weight, bias, stride, padding):\n return self.conv1dt_numpy(x, weight, bias, stride, padding)\n\n def __call__(self, inchan, outchan, kernel_len, stride, padding):\n in_len = np.random.randint(7, 64)\n x = np.random.randn(inchan, in_len)\n W = np.random.randn(inchan, outchan, kernel_len)\n B = np.random.randn(outchan)\n y1 = self.y_torch(x[None], W, B, stride, padding)[0]\n y2 = self.y_numpy(x, W, B, stride, padding)\n print(y1.shape, y2.shape)\n assert np.allclose(y1, y2)\n\n\ndef test():\n tester = Tester()\n for _ in range(32):\n tester(1, 1, 1, 1, 0)\n tester(1, 1, 2, 1, 0)\n tester(1, 1, 3, 1, 0)\n tester(4, 1, 3, 1, 0)\n tester(1, 2, 3, 1, 0)\n tester(1, 1, 4, 1, 0)\n tester(1, 2, 5, 1, 0)\n tester(1, 2, 7, 1, 0)\n\n tester(1, 1, 1, 2, 0)\n tester(1, 1, 2, 2, 0)\n tester(1, 1, 4, 3, 0)\n tester(4, 8, 4, 3, 0)\n tester(1, 1, 1, 1, 1)\n tester(1, 1, 3, 1, 1)\n tester(1, 1, 3, 2, 1)\n tester(1, 1, 3, 2, 2)\n\n tester(512, 256, 3, 1, 1)\n tester(256, 256, 3, 1, 1)\n tester(80, 80, 3, 1, 1)\n tester(512, 128, 13, 1, 6)\n tester(128, 128, 11, 1, 5)\n tester(128, 128, 9, 1, 4)\n tester(128, 128, 7, 1, 3)\n tester(128, 128, 5, 1, 2)\n tester(128, 128, 3, 1, 1)\n tester(128, 1, 1, 1, 0)\n\n tester(64, 32, 4, 2, 1)\n tester(128, 64, 4, 2, 1)\n tester(256, 128, 16, 8, 4)\n tester(512, 256, 16, 8, 4)\n"
] | [
[
"numpy.allclose",
"torch.nn.functional.conv_transpose1d",
"torch.tensor",
"numpy.random.randn",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangby511/Extreme-Dark-Video-Enhancement | [
"e0de50428d74a7cec2ee87b63e9fce9860dfd590"
] | [
"state-of-the-art/bmvc18/psnr_ssim_mabd.py"
] | [
"import os, glob, time\n\nimport tensorflow as tf\nimport numpy as np\nfrom skvideo.io import vread, vwrite\n\n\ndirectory = 'test_set_results/'\n\nTEST_RESULT_DIR = './result_MBLLVEN_raw_he2he/test/'\n\nMAX_VAL = 255\n\nsess = tf.Session()\nt_vid1 = tf.placeholder(tf.uint8, [None, None, None, None])\nt_vid2 = tf.placeholder(tf.uint8, [None, None, None, None])\nt_psnr = tf.reduce_mean(tf.image.psnr(t_vid1, t_vid2, MAX_VAL))\nt_ssim = tf.reduce_mean(tf.image.ssim(t_vid1, t_vid2, MAX_VAL))\n\n\ndef get_psnr_ssim(sess, vid1, vid2):\n assert vid1.shape[0] == vid2.shape[0]\n psnr = 0\n ssim = 0\n N = 20\n for i in range(vid1.shape[0] / N):\n psnr += sess.run(t_psnr, feed_dict={t_vid1: vid1[i * N:(i + 1) * N], t_vid2: vid2[i * N:(i + 1) * N]})\n ssim += sess.run(t_ssim, feed_dict={t_vid1: vid1[i * N:(i + 1) * N], t_vid2: vid2[i * N:(i + 1) * N]})\n return psnr / vid1.shape[0] * N, ssim / vid1.shape[0] * N\n\n\ndef brightness(vid):\n R, G, B = vid[:, :, :, 0], vid[:, :, :, 1], vid[:, :, :, 2]\n return (0.2126 * R + 0.7152 * G + 0.0722 * B) # refer to https://en.wikipedia.org/wiki/Relative_luminance\n\n\ndef get_mse_mabd(vid1, vid2):\n b_vid1 = brightness(vid1)\n b_vid2 = brightness(vid2)\n mabd1 = abs(np.diff(b_vid1)).mean(axis=(1,2))\n mabd2 = abs(np.diff(b_vid2)).mean(axis=(1,2))\n return ((mabd1 - mabd2) ** 2).mean()\n\n\noutput_files = glob.glob(TEST_RESULT_DIR + '*')\ngt_files = [os.path.basename(file)[:-4] for file in output_files]\n\nif 'psnr_ssim_mabd' in os.listdir('.'):\n os.rename('psnr_ssim_mabd', 'psnr_ssim_mabd' + '_' + str(time.localtime().tm_mon).zfill(2) + str(time.localtime().tm_mday).zfill(2) + '-' + str(time.localtime().tm_hour).zfill(2) + str(time.localtime().tm_min).zfill(2))\n\nwith open('psnr_ssim_mabd', 'w') as f:\n pass\n\nall_psnr = 0\nall_ssim = 0\nall_mabd = 0\n\nfor output_file in output_files:\n out_vid = vread(output_file)\n gt_file = os.path.basename(output_file)[:-4] + '.npy'\n gt_vid = np.load('../../0_data/gt_he/' + gt_file)\n t0 = time.time()\n psnr, ssim = get_psnr_ssim(sess, out_vid, gt_vid)\n t1 = time.time()\n mabd = get_mse_mabd(out_vid, gt_vid)\n t2 = time.time()\n print('Done.\\t{}s\\t{}s'.format(t1 - t0, t2 - t1))\n with open('psnr_ssim_mabd', 'a') as f:\n f.write(os.path.basename(output_file)[:-4] + ' ' + str(psnr) + ' ' + str(ssim) + ' ' + str(mabd) + '\\n')\n all_psnr += psnr\n all_ssim += ssim\n all_mabd += mabd\n\nwith open('psnr_ssim_mabd', 'a') as f:\n f.write('\\n' * 3 + 'overall_average ' + str(all_psnr / len(gt_files)) + ' ' + str(all_ssim / len(gt_files)) + ' ' + str(all_mabd / len(gt_files)) + '\\n')\n"
] | [
[
"numpy.load",
"tensorflow.placeholder",
"numpy.diff",
"tensorflow.Session",
"tensorflow.image.ssim",
"tensorflow.image.psnr"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
huyhoang17/DB_text_minimal | [
"0d1466889b21cb74a0571a0fb3856902739ea523"
] | [
"src/data_loaders.py"
] | [
"import os\nimport glob\nimport math\n\nimport hydra\nimport cv2\nimport numpy as np\nfrom shapely.geometry import Polygon\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport imgaug.augmenters as iaa\nimport pyclipper\n\nimport db_transforms\nfrom utils import dict_to_device, minmax_scaler_img\n\n\nclass BaseDatasetIter(Dataset):\n def __init__(self,\n train_dir,\n train_gt_dir,\n ignore_tags,\n is_training=True,\n image_size=640,\n min_text_size=8,\n shrink_ratio=0.4,\n thresh_min=0.3,\n thresh_max=0.7,\n augment=None,\n mean=[103.939, 116.779, 123.68],\n debug=False):\n\n self.train_dir = train_dir\n self.train_gt_dir = train_gt_dir\n self.ignore_tags = ignore_tags\n\n self.is_training = is_training\n self.image_size = image_size\n self.min_text_size = min_text_size\n self.shrink_ratio = shrink_ratio\n self.thresh_min = thresh_min\n self.thresh_max = thresh_max\n self.augment = augment\n if self.augment is None:\n self.augment = self._get_default_augment()\n\n self.mean = mean\n self.debug = debug\n\n # load metadata\n self.image_paths, self.gt_paths = self.load_metadata(\n train_dir, train_gt_dir)\n\n # load annotation\n self.all_anns = self.load_all_anns(self.gt_paths)\n assert len(self.image_paths) == len(self.all_anns)\n\n def _get_default_augment(self):\n augment_seq = iaa.Sequential([\n iaa.Fliplr(0.5),\n iaa.Affine(rotate=(-10, 10)),\n iaa.Resize((0.5, 3.0))\n ])\n return augment_seq\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, index):\n\n image_path = self.image_paths[index]\n anns = self.all_anns[index]\n\n if self.debug:\n print(image_path)\n print(len(anns))\n\n img = cv2.imread(image_path)[:, :, ::-1]\n if self.is_training and self.augment is not None:\n augment_seq = self.augment.to_deterministic()\n img, anns = db_transforms.transform(augment_seq, img, anns)\n img, anns = db_transforms.crop(img, anns)\n\n img, anns = db_transforms.resize(self.image_size, img, anns)\n\n anns = [ann for ann in anns if Polygon(ann['poly']).buffer(0).is_valid]\n gt = np.zeros((self.image_size, self.image_size),\n dtype=np.float32) # batch_gts\n mask = np.ones((self.image_size, self.image_size), dtype=np.float32)\n thresh_map = np.zeros((self.image_size, self.image_size),\n dtype=np.float32) # batch_thresh_maps\n # batch_thresh_masks\n thresh_mask = np.zeros((self.image_size, self.image_size),\n dtype=np.float32)\n\n if self.debug:\n print(type(anns), len(anns))\n\n ignore_tags = []\n for ann in anns:\n # i.e shape = (4, 2) / (6, 2) / ...\n poly = np.array(ann['poly'])\n height = max(poly[:, 1]) - min(poly[:, 1])\n width = max(poly[:, 0]) - min(poly[:, 0])\n polygon = Polygon(poly)\n\n # generate gt and mask\n if polygon.area < 1 or \\\n min(height, width) < self.min_text_size or \\\n ann['text'] in self.ignore_tags:\n ignore_tags.append(True)\n cv2.fillPoly(mask, poly.astype(np.int32)[np.newaxis, :, :], 0)\n continue\n else:\n # 6th equation\n distance = polygon.area * \\\n (1 - np.power(self.shrink_ratio, 2)) / polygon.length\n subject = [tuple(_l) for _l in ann['poly']]\n padding = pyclipper.PyclipperOffset()\n padding.AddPath(subject, pyclipper.JT_ROUND,\n pyclipper.ET_CLOSEDPOLYGON)\n shrinked = padding.Execute(-distance)\n\n if len(shrinked) == 0:\n ignore_tags.append(True)\n cv2.fillPoly(mask,\n poly.astype(np.int32)[np.newaxis, :, :], 0)\n continue\n else:\n shrinked = np.array(shrinked[0]).reshape(-1, 2)\n if shrinked.shape[0] > 2 and \\\n Polygon(shrinked).buffer(0).is_valid:\n ignore_tags.append(False)\n cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1)\n else:\n ignore_tags.append(True)\n cv2.fillPoly(mask,\n poly.astype(np.int32)[np.newaxis, :, :],\n 0)\n continue\n\n # generate thresh map and thresh mask\n db_transforms.draw_thresh_map(ann['poly'],\n thresh_map,\n thresh_mask,\n shrink_ratio=self.shrink_ratio)\n\n thresh_map = thresh_map * \\\n (self.thresh_max - self.thresh_min) + self.thresh_min\n\n img = img.astype(np.float32)\n img[..., 0] -= self.mean[0]\n img[..., 1] -= self.mean[1]\n img[..., 2] -= self.mean[2]\n\n img = np.transpose(img, (2, 0, 1))\n\n data_return = {\n \"image_path\": image_path,\n \"img\": img,\n \"prob_map\": gt,\n \"supervision_mask\": mask,\n \"thresh_map\": thresh_map,\n \"text_area_map\": thresh_mask,\n }\n # for batch_size = 1\n if not self.is_training:\n data_return[\"anns\"] = [ann['poly'] for ann in anns]\n data_return[\"ignore_tags\"] = ignore_tags\n\n # return image_path, img, gt, mask, thresh_map, thresh_mask\n return data_return\n\n\nclass TotalTextDatasetIter(BaseDatasetIter):\n def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):\n super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)\n\n def load_metadata(self, img_dir, gt_dir):\n img_fps = sorted(glob.glob(os.path.join(img_dir, \"*\")))\n gt_fps = []\n for img_fp in img_fps:\n img_id = img_fp.split(\"/\")[-1].replace(\"img\", \"\").split(\".\")[0]\n gt_fn = \"gt_img{}.txt\".format(img_id)\n gt_fp = os.path.join(gt_dir, gt_fn)\n assert os.path.exists(img_fp)\n gt_fps.append(gt_fp)\n assert len(img_fps) == len(gt_fps)\n\n return img_fps, gt_fps\n\n def load_all_anns(self, gt_paths):\n res = []\n for gt in gt_paths:\n lines = []\n reader = open(gt, 'r').readlines()\n for line in reader:\n item = {}\n parts = line.strip().split(',')\n label = parts[-1]\n line = [i.strip('\\ufeff').strip('\\xef\\xbb\\xbf') for i in parts]\n num_points = math.floor((len(line) - 1) / 2) * 2\n poly = np.array(list(map(float, line[:num_points]))).reshape(\n (-1, 2)).tolist()\n if len(poly) < 3:\n continue\n item['poly'] = poly\n item['text'] = label\n lines.append(item)\n res.append(lines)\n return res\n\n\nclass CTW1500DatasetIter(BaseDatasetIter):\n def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):\n super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)\n\n def load_metadata(self, img_dir, gt_dir):\n img_fps = sorted(glob.glob(os.path.join(img_dir, \"*\")))\n gt_fps = []\n for img_fp in img_fps:\n img_id = img_fp.split(\"/\")[-1][:-4]\n gt_fn = \"{}.txt\".format(img_id)\n gt_fp = os.path.join(gt_dir, gt_fn)\n assert os.path.exists(img_fp)\n gt_fps.append(gt_fp)\n assert len(img_fps) == len(gt_fps)\n\n return img_fps, gt_fps\n\n def load_all_anns(self, gt_fps):\n \"\"\"\n Reference: https://github.com/whai362/PSENet/blob/master/dataset/ctw1500_loader.py\n \"\"\"\n res = []\n for gt_fp in gt_fps:\n lines = []\n with open(gt_fp, 'r') as f:\n for line in f:\n item = {}\n gt = line.strip().strip('\\ufeff').strip('\\xef\\xbb\\xbf')\n gt = list(map(int, gt.split(',')))\n\n x1 = np.int(gt[0])\n y1 = np.int(gt[1])\n bbox = [np.int(gt[i]) for i in range(4, 32)]\n bbox = np.asarray(bbox) + ([x1, y1] * 14)\n bbox = bbox.reshape(-1, 2).tolist()\n item['poly'] = bbox\n item['text'] = 'True'\n lines.append(item)\n res.append(lines)\n return res\n\n\nclass ICDAR2015DatasetIter(BaseDatasetIter):\n def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):\n super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)\n\n def load_metadata(self, img_dir, gt_dir):\n img_fps = glob.glob(os.path.join(img_dir, \"*\"))\n gt_fps = []\n for img_fp in img_fps:\n img_id = img_fp.split(\"/\")[-1].split(\".\")[0]\n gt_fn = \"gt_{}.txt\".format(img_id)\n gt_fp = os.path.join(gt_dir, gt_fn)\n assert os.path.exists(img_fp)\n gt_fps.append(gt_fp)\n assert len(img_fps) == len(gt_fps)\n\n return img_fps, gt_fps\n\n def load_all_anns(self, gt_fps):\n res = []\n for gt_fp in gt_fps:\n lines = []\n with open(gt_fp, 'r') as f:\n for line in f:\n item = {}\n gt = line.strip().strip('\\ufeff').strip(\n '\\xef\\xbb\\xbf').split(\",\")\n label = \",\".join(gt[8:])\n poly = list(map(int, gt[:8]))\n poly = np.asarray(poly).reshape(-1, 2).tolist()\n item['poly'] = poly\n item['text'] = label\n lines.append(item)\n res.append(lines)\n return res\n\n\nclass MSRATD500DatasetIter(BaseDatasetIter):\n def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):\n super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)\n\n def transform_four_points(self, points, center_point, theta):\n \"\"\"Reference: https://stackoverflow.com/questions/622140\n \"\"\"\n theta = -theta\n new_coords = []\n x_center, y_center = center_point\n\n for point in points:\n x, y = point\n x_new = x_center + (x - x_center) * np.cos(theta) + \\\n (y - y_center) * np.sin(theta)\n y_new = y_center - (x - x_center) * np.sin(theta) + \\\n (y - y_center) * np.cos(theta)\n x_new = int(x_new)\n y_new = int(y_new)\n new_coords.append((x_new, y_new))\n return new_coords\n\n def load_metadata(self, img_dir, gt_dir=None):\n # ignore gt_dir\n img_fps = sorted(glob.glob(os.path.join(img_dir, \"*.JPG\")))\n gt_fps = sorted(glob.glob(os.path.join(img_dir, \"*.gt\")))\n assert len(img_fps) == len(gt_fps)\n\n return img_fps, gt_fps\n\n def load_all_anns(self, gt_fps):\n res = []\n for gt_fp in gt_fps:\n lines = []\n with open(gt_fp, 'r') as f:\n for line in f:\n item = {}\n line = list(map(float, line.strip().split()))\n index, dif, x_min, y_min, w, h, theta = line\n if int(dif) == 1: # difficult label\n continue\n\n c1 = (x_min, y_min)\n c2 = (x_min + w, y_min)\n c3 = (x_min + w, y_min + h)\n c4 = (x_min, y_min + h)\n center = (x_min + w / 2, y_min + h / 2)\n rot_box = self.transform_four_points([c1, c2, c3, c4],\n center, theta)\n rot_box = np.array(rot_box).tolist()\n\n item['poly'] = rot_box\n item['text'] = 'True'\n lines.append(item)\n res.append(lines)\n return res\n\n\[email protected](config_path=\"../config.yaml\", strict=False)\ndef run(cfg):\n dataset_name = cfg.dataset.name\n ignore_tags = cfg.data[dataset_name].ignore_tags\n train_dir = cfg.data[dataset_name].train_dir\n train_gt_dir = cfg.data[dataset_name].train_gt_dir\n\n if dataset_name == 'totaltext':\n TextDatasetIter = TotalTextDatasetIter\n elif dataset_name == 'ctw1500':\n TextDatasetIter = CTW1500DatasetIter\n elif dataset_name == 'icdar2015':\n TextDatasetIter = ICDAR2015DatasetIter\n elif dataset_name == 'msra_td500':\n TextDatasetIter = MSRATD500DatasetIter\n else:\n raise NotImplementedError(\"Pls provide valid dataset name!\")\n train_iter = TextDatasetIter(train_dir,\n train_gt_dir,\n ignore_tags,\n is_training=True,\n debug=False)\n train_loader = DataLoader(dataset=train_iter,\n batch_size=1,\n shuffle=True,\n num_workers=1)\n samples = next(iter(train_loader))\n samples = dict_to_device(samples, device='cpu')\n for k, v in samples.items():\n if isinstance(v, torch.Tensor):\n print(samples[k].device)\n import matplotlib.pyplot as plt\n plt.figure()\n plt.imshow(minmax_scaler_img(samples['img'][0].numpy().transpose(1, 2, 0)))\n plt.imshow(samples['prob_map'][0], cmap='jet', alpha=0.35)\n plt.imshow(samples['thresh_map'][0], cmap='jet', alpha=0.5)\n # plt.imshow(samples['text_area_map'][0], cmap='jet', alpha=0.5)\n # plt.imshow(samples['supervision_mask'][0], cmap='jet', alpha=0.5)\n plt.savefig(os.path.join(cfg.meta.root_dir, 'tmp/foo.jpg'),\n bbox_inches='tight')\n\n\nif __name__ == '__main__':\n run()\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.power",
"numpy.asarray",
"torch.utils.data.DataLoader",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"numpy.int",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ghmole/akshare | [
"eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6",
"eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6",
"eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6",
"eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6",
"eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6",
"eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6",
"eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6",
"eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6"
] | [
"akshare/option/option_commodity.py",
"akshare/wdbank/api.py",
"akshare/utils/token_process.py",
"akshare/stock_fundamental/stock_hold.py",
"akshare/futures/futures_sgx_daily.py",
"akshare/index/index_google.py",
"akshare/stock_feature/stock_board_ths.py",
"akshare/bond/bond_china_money.py"
] | [
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2021/1/14 20:50\nDesc: 商品期权数据\n说明:\n(1) 价格:自2019年12月02日起,纤维板报价单位由元/张改为元/立方米\n(2) 价格:元/吨,鸡蛋为元/500千克,纤维板为元/立方米,胶合板为元/张\n(3) 成交量、持仓量:手(按双边计算)\n(4) 成交额:万元(按双边计算)\n(5) 涨跌=收盘价-前结算价\n(6) 涨跌1=今结算价-前结算价\n(7) 合约系列:具有相同月份标的期货合约的所有期权合约的统称\n(8) 隐含波动率:根据期权市场价格,利用期权定价模型计算的标的期货合约价格波动率\n\"\"\"\nimport datetime\nimport warnings\nfrom io import StringIO, BytesIO\n\nimport requests\nimport pandas as pd\n\nfrom akshare.option.cons import (\n get_calendar,\n convert_date,\n DCE_DAILY_OPTION_URL,\n SHFE_OPTION_URL,\n CZCE_DAILY_OPTION_URL_3,\n SHFE_HEADERS,\n)\n\n\ndef get_dce_option_daily(trade_date=\"20200817\", symbol=\"聚丙烯期权\"):\n \"\"\"\n 大连商品交易所-期权-日频行情数据\n :param trade_date: str format:\"20191017\"\n :param symbol: str \"玉米期权\" or \"豆粕期权\" or \"铁矿石期权\", or \"液化石油气期权\" or \"聚乙烯期权\" or \"聚氯乙烯期权\" or \"聚丙烯期权\"\n :return: pandas.DataFrame\n part-1:\n 商品名称 合约名称 开盘价 最高价 最低价 收盘价 前结算价 结算价 涨跌 涨跌1 \\\n 0 玉米 c2001-C-1680 168.5 168.5 168.5 168.5 168.0 167.5 0.5 -0.5\n 1 玉米 c2001-C-1700 0 0.0 0.0 148.0 148.0 148.0 0.0 0.0\n 2 玉米 c2001-C-1720 0 0.0 0.0 129.0 128.0 129.0 1.0 1.0\n 3 玉米 c2001-C-1740 115 115.0 115.0 115.0 108.0 111.0 7.0 3.0\n 4 玉米 c2001-C-1760 89 95.5 89.0 95.5 89.0 93.5 6.5 4.5\n .. ... ... ... ... ... ... ... ... ... ...\n 239 玉米 c2009-P-2040 0 0.0 0.0 91.0 88.5 91.0 2.5 2.5\n 240 玉米 c2009-P-2060 0 0.0 0.0 106.0 104.0 106.0 2.0 2.0\n 241 玉米 c2009-P-2080 0 0.0 0.0 121.5 120.5 121.5 1.0 1.0\n 242 玉米 c2009-P-2100 0 0.0 0.0 138.5 137.5 138.5 1.0 1.0\n 243 玉米 c2009-P-2120 0 0.0 0.0 155.5 155.5 155.5 0.0 0.0\n Delta 成交量 持仓量 持仓量变化 成交额 行权量\n 0 0.98 2 236 0 0.34 0.0\n 1 0.96 0 236 0 0 0.0\n 2 0.94 0 210 0 0 0.0\n 3 0.90 20 1,040 0 2.3 0.0\n 4 0.85 12 680 0 1.11 0.0\n .. ... .. ... ... ... ...\n 239 -0.70 0 30 0 0 0.0\n 240 -0.75 0 50 0 0 0.0\n 241 -0.80 0 20 0 0 0.0\n 242 -0.84 0 10 0 0 0.0\n 243 -0.88 0 0 0 0 0.0\n\n part-2:\n 0 合约系列 隐含波动率(%)\n 1 c2001 12.95\n 2 c2003 8.74\n 3 c2005 8.75\n 4 c2007 7.7\n 5 c2009 6.85\n \"\"\"\n calendar = get_calendar()\n day = convert_date(trade_date) if trade_date is not None else datetime.date.today()\n if day.strftime(\"%Y%m%d\") not in calendar:\n warnings.warn(\"%s非交易日\" % day.strftime(\"%Y%m%d\"))\n return None\n url = DCE_DAILY_OPTION_URL\n payload = {\n \"dayQuotes.variety\": \"all\",\n \"dayQuotes.trade_type\": \"1\",\n \"year\": str(day.year),\n \"month\": str(day.month - 1),\n \"day\": str(day.day),\n \"exportFlag\": \"excel\",\n }\n res = requests.post(url, data=payload)\n table_df = pd.read_excel(BytesIO(res.content), header=0)\n another_df = table_df.iloc[\n table_df[table_df.iloc[:, 0].str.contains(\"合约\")].iloc[-1].name:, [0, 1]\n ]\n another_df.reset_index(inplace=True, drop=True)\n another_df.iloc[0] = another_df.iat[0, 0].split(\"\\t\")\n another_df.columns = another_df.iloc[0]\n another_df = another_df.iloc[1:, :]\n if symbol == \"豆粕期权\":\n return table_df[table_df[\"商品名称\"] == \"豆粕\"], another_df[another_df.iloc[:, 0].str.contains(\"m\")]\n elif symbol == \"玉米期权\":\n return table_df[table_df[\"商品名称\"] == \"玉米\"], another_df[another_df.iloc[:, 0].str.contains(\"c\")]\n elif symbol == \"铁矿石期权\":\n return table_df[table_df[\"商品名称\"] == \"铁矿石\"], another_df[another_df.iloc[:, 0].str.contains(\"i\")]\n elif symbol == \"液化石油气期权\":\n return table_df[table_df[\"商品名称\"] == \"液化石油气\"], another_df[another_df.iloc[:, 0].str.contains(\"pg\")]\n elif symbol == \"聚乙烯期权\":\n return table_df[table_df[\"商品名称\"] == \"聚乙烯\"], another_df[another_df.iloc[:, 0].str.contains(\"i\")]\n elif symbol == \"聚氯乙烯期权\":\n return table_df[table_df[\"商品名称\"] == \"聚氯乙烯\"], another_df[another_df.iloc[:, 0].str.contains(\"v\")]\n elif symbol == \"聚丙烯期权\":\n return table_df[table_df[\"商品名称\"] == \"聚丙烯\"], another_df[another_df.iloc[:, 0].str.contains(\"pp\")]\n\n\ndef get_czce_option_daily(trade_date=\"20191017\", symbol=\"白糖期权\"):\n \"\"\"\n 郑州商品交易所-期权-日频行情数据\n 说明:\n (1) 价格:元/吨\n (2) 成交量、空盘量:手\n (3) 成交额:万元\n (4) 涨跌一:今收盘-昨结算\n (5) 涨跌二:今结算-昨结算\n (6) 隐含波动率:将当日期权合约的结算价代入期权定价模型,反推出来的波动率数值\n :param trade_date: str \"20191017\"\n :param symbol: str \"白糖期权\", \"棉花期权\", \"甲醇期权\", \"PTA期权\", \"菜籽粕期权\"\n :return: pandas.DataFrame\n 郑商所每日期权交易数据\n 品种代码 昨结算 今开盘 最高价 最低价 今收盘 \\\n 0 CF001C10800 1,579.00 0.00 0.00 0.00 0.00\n 1 CF001C11000 1,392.00 0.00 0.00 0.00 0.00\n 2 CF001C11200 1,211.00 0.00 0.00 0.00 0.00\n 3 CF001C11400 1,038.00 1,396.00 1,396.00 1,396.00 1,396.00\n 4 CF001C11600 874.00 0.00 0.00 0.00 0.00\n .. ... ... ... ... ... ...\n 398 SR009P5900 576.00 0.00 0.00 0.00 0.00\n 399 SR009P6000 653.00 0.00 0.00 0.00 0.00\n 400 小计\n 401 SR合计\n 402 总计\n 今结算 涨跌1 涨跌2 成交量(手) 空盘量 增减量 \\\n 0 1,866.00 287.00 287.00 0 0 0\n 1 1,672.00 280.00 280.00 0 0 0\n 2 1,481.00 270.00 270.00 0 4 0\n 3 1,295.00 358.00 257.00 2 68 0\n 4 1,114.00 240.00 240.00 0 224 0\n .. ... ... ... ... ... ...\n 398 580.00 4.00 4.00 0 0 0\n 399 658.00 5.00 5.00 0 0 0\n 400 656 860 400\n 401 32,098 276,900 2252\n 402 110,664 474,154 14770\n 成交额(万元) DELTA 隐含波动率 行权量\n 0 0.00 0.9765 22.29 0\n 1 0.00 0.9621 21.84 0\n 2 0.00 0.9423 21.38 0\n 3 1.40 0.9155 20.91 0\n 4 0.00 0.8800 20.45 0\n .. ... ... ... ...\n 398 0.00 -0.6639 16.24 0\n 399 0.00 -0.7007 16.58 0\n 400 97.28 0\n 401 2138.41 0\n 402 8769.52 2\n \"\"\"\n calendar = get_calendar()\n day = convert_date(trade_date) if trade_date is not None else datetime.date.today()\n if day.strftime(\"%Y%m%d\") not in calendar:\n warnings.warn(\"{}非交易日\".format(day.strftime(\"%Y%m%d\")))\n return None\n if day > datetime.date(2010, 8, 24):\n url = CZCE_DAILY_OPTION_URL_3.format(day.strftime(\"%Y\"), day.strftime(\"%Y%m%d\"))\n try:\n r = requests.get(url)\n f = StringIO(r.text)\n table_df = pd.read_table(f, encoding=\"utf-8\", skiprows=1, sep=\"|\")\n if symbol == \"白糖期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"SR\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n elif symbol == \"PTA期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"TA\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n elif symbol == \"甲醇期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"MA\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n elif symbol == \"菜籽粕期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"RM\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n elif symbol == \"动力煤期权\":\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"ZC\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n else:\n temp_df = table_df[table_df.iloc[:, 0].str.contains(\"CF\")]\n temp_df.reset_index(inplace=True, drop=True)\n return temp_df.iloc[:-1, :]\n except:\n return None\n\n\ndef get_shfe_option_daily(trade_date=\"20200827\", symbol=\"铝期权\"):\n \"\"\"\n 上海期货交易所-期权-日频行情数据\n :param trade_date: str \"20191017\"\n :param symbol: str \"铜期权\" or \"天胶期权\" or \"黄金期权\" or \"铝期权\" or \"锌期权\"\n :return: tuple(pandas.DataFrame)\n \"\"\"\n calendar = get_calendar()\n day = convert_date(trade_date) if trade_date is not None else datetime.date.today()\n if day.strftime(\"%Y%m%d\") not in calendar:\n warnings.warn(\"%s非交易日\" % day.strftime(\"%Y%m%d\"))\n return None\n if day > datetime.date(2010, 8, 24):\n url = SHFE_OPTION_URL.format(day.strftime(\"%Y%m%d\"))\n try:\n r = requests.get(url, headers=SHFE_HEADERS)\n json_data = r.json()\n table_df = pd.DataFrame(\n [\n row\n for row in json_data[\"o_curinstrument\"]\n if row[\"INSTRUMENTID\"] not in [\"小计\", \"合计\"]\n and row[\"INSTRUMENTID\"] != \"\"\n ]\n )\n contract_df = table_df[table_df[\"PRODUCTNAME\"].str.strip() == symbol]\n product_df = pd.DataFrame(json_data[\"o_curproduct\"])\n product_df = product_df[product_df[\"PRODUCTNAME\"].str.strip() == symbol]\n volatility_df = pd.DataFrame(json_data[\"o_cursigma\"])\n volatility_df = volatility_df[\n volatility_df[\"PRODUCTNAME\"].str.strip() == symbol\n ]\n contract_df.columns = [\n \"_\",\n \"_\",\n \"_\",\n \"合约代码\",\n \"前结算价\",\n \"开盘价\",\n \"最高价\",\n \"最低价\",\n \"收盘价\",\n \"结算价\",\n \"涨跌1\",\n \"涨跌2\",\n \"成交量\",\n \"持仓量\",\n \"持仓量变化\",\n \"_\",\n \"行权量\",\n \"成交额\",\n \"德尔塔\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n ]\n contract_df = contract_df[[\n \"合约代码\",\n \"开盘价\",\n \"最高价\",\n \"最低价\",\n \"收盘价\",\n \"前结算价\",\n \"结算价\",\n \"涨跌1\",\n \"涨跌2\",\n \"成交量\",\n \"持仓量\",\n \"持仓量变化\",\n \"成交额\",\n \"德尔塔\",\n \"行权量\",\n ]]\n\n volatility_df.columns = [\n \"_\",\n \"_\",\n \"_\",\n \"合约系列\",\n \"成交量\",\n \"持仓量\",\n \"持仓量变化\",\n \"行权量\",\n \"成交额\",\n \"隐含波动率\",\n \"_\",\n ]\n\n volatility_df = volatility_df[[\n \"合约系列\",\n \"成交量\",\n \"持仓量\",\n \"持仓量变化\",\n \"成交额\",\n \"行权量\",\n \"隐含波动率\",\n ]]\n return contract_df, volatility_df\n except:\n return None\n\n\nif __name__ == \"__main__\":\n get_czce_option_daily_df = get_czce_option_daily(trade_date=\"20200817\", symbol=\"动力煤期权\")\n print(get_czce_option_daily_df)\n get_dce_option_daily_one, get_dce_option_daily_two = get_dce_option_daily(trade_date=\"20210113\", symbol=\"玉米期权\")\n print(get_dce_option_daily_one)\n print(get_dce_option_daily_two)\n get_shfe_option_daily_one, get_shfe_option_daily_two = get_shfe_option_daily(trade_date=\"20210312\", symbol=\"天胶期权\")\n print(get_shfe_option_daily_one)\n print(get_shfe_option_daily_two)\n",
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2020/3/17 22:11\nDesc: world bank interface\n\"\"\"\nimport collections\nimport datetime\nimport re\nimport warnings\n\nimport pandas as pd\nimport tabulate\nfrom decorator import decorator\n\nfrom . import fetcher\n\nBASE_URL = \"https://api.worldbank.org/v2\"\nCOUNTRIES_URL = f\"{BASE_URL}/countries\"\nILEVEL_URL = f\"{BASE_URL}/incomeLevels\"\nINDICATOR_URL = f\"{BASE_URL}/indicators\"\nLTYPE_URL = f\"{BASE_URL}/lendingTypes\"\nSOURCES_URL = f\"{BASE_URL}/sources\"\nTOPIC_URL = f\"{BASE_URL}/topics\"\nINDIC_ERROR = \"Cannot specify more than one of indicator, source, and topic\"\n\n\nclass WBSearchResult(list):\n \"\"\"\n A list that prints out a user-friendly table when printed or returned on the\n command line\n\n\n Items are expected to be dict-like and have an \"id\" key and a \"name\" or\n \"value\" key\n \"\"\"\n\n def __repr__(self):\n try:\n return tabulate.tabulate(\n [[o[\"id\"], o[\"name\"]] for o in self],\n headers=[\"id\", \"name\"],\n tablefmt=\"simple\",\n )\n except KeyError:\n return tabulate.tabulate(\n [[o[\"id\"], o[\"value\"]] for o in self],\n headers=[\"id\", \"value\"],\n tablefmt=\"simple\",\n )\n\n\nif pd:\n\n class WBSeries(pd.Series):\n \"\"\"\n A pandas Series with a last_updated attribute\n \"\"\"\n\n _metadata = [\"last_updated\"]\n\n @property\n def _constructor(self):\n return WBSeries\n\n class WBDataFrame(pd.DataFrame):\n \"\"\"\n A pandas DataFrame with a last_updated attribute\n \"\"\"\n\n _metadata = [\"last_updated\"]\n\n @property\n def _constructor(self):\n return WBDataFrame\n\n\n@decorator\ndef uses_pandas(f, *args, **kwargs):\n \"\"\"Raise ValueError if pandas is not loaded\"\"\"\n if not pd:\n raise ValueError(\"Pandas must be installed to be used\")\n return f(*args, **kwargs)\n\n\ndef parse_value_or_iterable(arg):\n \"\"\"\n If arg is a single value, return it as a string; if an iterable, return a\n ;-joined string of all values\n \"\"\"\n if str(arg) == arg:\n return arg\n if type(arg) == int:\n return str(arg)\n return \";\".join(arg)\n\n\ndef convert_year_to_datetime(yearstr):\n \"\"\"return datetime.datetime object from %Y formatted string\"\"\"\n return datetime.datetime.strptime(yearstr, \"%Y\")\n\n\ndef convert_month_to_datetime(monthstr):\n \"\"\"return datetime.datetime object from %YM%m formatted string\"\"\"\n split = monthstr.split(\"M\")\n return datetime.datetime(int(split[0]), int(split[1]), 1)\n\n\ndef convert_quarter_to_datetime(quarterstr):\n \"\"\"\n return datetime.datetime object from %YQ%# formatted string, where # is\n the desired quarter\n \"\"\"\n split = quarterstr.split(\"Q\")\n quarter = int(split[1])\n month = quarter * 3 - 2\n return datetime.datetime(int(split[0]), month, 1)\n\n\ndef convert_dates_to_datetime(data):\n \"\"\"\n Return a datetime.datetime object from a date string as provided by the\n World Bank\n \"\"\"\n first = data[0][\"date\"]\n if isinstance(first, datetime.datetime):\n return data\n if \"M\" in first:\n converter = convert_month_to_datetime\n elif \"Q\" in first:\n converter = convert_quarter_to_datetime\n else:\n converter = convert_year_to_datetime\n for datum in data:\n datum_date = datum[\"date\"]\n if \"MRV\" in datum_date:\n continue\n if \"-\" in datum_date:\n continue\n datum[\"date\"] = converter(datum_date)\n return data\n\n\ndef cast_float(value):\n \"\"\"\n Return a floated value or none\n \"\"\"\n try:\n return float(value)\n except (ValueError, TypeError):\n return None\n\n\ndef get_series(\n indicator,\n country=\"all\",\n data_date=None,\n source=None,\n convert_date=False,\n column_name=\"value\",\n keep_levels=False,\n cache=True,\n):\n \"\"\"\n Retrieve indicators for given countries and years\n\n :indicator: the desired indicator code\n :country: a country code, sequence of country codes, or \"all\" (default)\n :data_date: the desired date as a datetime object or a 2-tuple with start\n and end dates\n :source: the specific source to retrieve data from (defaults on API to 2,\n World Development Indicators)\n :convert_date: if True, convert date field to a datetime.datetime object.\n :column_name: the desired name for the pandas column\n :keep_levels: if True and pandas is True, don't reduce the number of index\n levels returned if only getting one date or country\n :cache: use the cache\n :returns: WBSeries\n \"\"\"\n raw_data = get_data(\n indicator=indicator,\n country=country,\n data_date=data_date,\n source=source,\n convert_date=convert_date,\n cache=cache,\n )\n df = pd.DataFrame(\n [[i[\"country\"][\"value\"], i[\"date\"], i[\"value\"]] for i in raw_data],\n columns=[\"country\", \"date\", column_name],\n )\n df[column_name] = df[column_name].map(cast_float)\n if not keep_levels and len(df[\"country\"].unique()) == 1:\n df = df.set_index(\"date\")\n elif not keep_levels and len(df[\"date\"].unique()) == 1:\n df = df.set_index(\"country\")\n else:\n df = df.set_index([\"country\", \"date\"])\n series = WBSeries(df[column_name])\n series.last_updated = raw_data.last_updated\n return series\n\n\ndef get_data(\n indicator,\n country=\"all\",\n data_date=None,\n source=None,\n convert_date=False,\n pandas=False,\n column_name=\"value\",\n keep_levels=False,\n cache=True,\n):\n \"\"\"\n Retrieve indicators for given countries and years\n\n :indicator: the desired indicator code\n :country: a country code, sequence of country codes, or \"all\" (default)\n :data_date: the desired date as a datetime object or a 2-tuple with start\n and end dates\n :source: the specific source to retrieve data from (defaults on API to 2,\n World Development Indicators)\n :convert_date: if True, convert date field to a datetime.datetime object.\n :cache: use the cache\n :returns: list of dictionaries or pandas Series\n \"\"\"\n if pandas:\n warnings.warn(\n (\n \"Argument 'pandas' is deprecated and will be removed in a \"\n \"future version. Use get_series or get_dataframe instead.\"\n ),\n PendingDeprecationWarning,\n )\n return get_series(\n indicator=indicator,\n country=country,\n data_date=data_date,\n source=source,\n convert_date=convert_date,\n column_name=column_name,\n keep_levels=keep_levels,\n cache=cache,\n )\n query_url = COUNTRIES_URL\n try:\n c_part = parse_value_or_iterable(country)\n except TypeError:\n raise TypeError(\"'country' must be a string or iterable'\")\n query_url = \"/\".join((query_url, c_part, \"indicators\", indicator))\n args = {}\n if data_date:\n if isinstance(data_date, collections.Sequence):\n data_date_str = \":\".join((i.strftime(\"%Y\") for i in data_date))\n args[\"date\"] = data_date_str\n else:\n args[\"date\"] = data_date.strftime(\"%Y\")\n if source:\n args[\"source\"] = source\n data = fetcher.fetch(query_url, args, cache=cache)\n if convert_date:\n data = convert_dates_to_datetime(data)\n return data\n\n\ndef id_only_query(query_url, query_id, cache):\n \"\"\"\n Retrieve information when ids are the only arguments\n\n :query_url: the base url to use for the query\n :query_id: an id or sequence of ids\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects describing results\n \"\"\"\n if query_id:\n query_url = \"/\".join((query_url, parse_value_or_iterable(query_id)))\n return WBSearchResult(fetcher.fetch(query_url))\n\n\ndef get_source(source_id=None, cache=True):\n \"\"\"\n Retrieve information on a source\n\n :source_id: a source id or sequence thereof. None returns all sources\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects describing selected\n sources\n \"\"\"\n return id_only_query(SOURCES_URL, source_id, cache=cache)\n\n\ndef get_incomelevel(level_id=None, cache=True):\n \"\"\"\n Retrieve information on an income level aggregate\n\n :level_id: a level id or sequence thereof. None returns all income level\n aggregates\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects describing selected\n income level aggregates\n \"\"\"\n return id_only_query(ILEVEL_URL, level_id, cache=cache)\n\n\ndef get_topic(topic_id=None, cache=True):\n \"\"\"\n Retrieve information on a topic\n\n :topic_id: a topic id or sequence thereof. None returns all topics\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects describing selected\n topic aggregates\n \"\"\"\n return id_only_query(TOPIC_URL, topic_id, cache=cache)\n\n\ndef get_lendingtype(type_id=None, cache=True):\n \"\"\"\n Retrieve information on an income level aggregate\n\n :level_id: lending type id or sequence thereof. None returns all lending\n type aggregates\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects describing selected\n topic aggregates\n \"\"\"\n return id_only_query(LTYPE_URL, type_id, cache=cache)\n\n\ndef get_country(country_id=None, incomelevel=None, lendingtype=None, cache=True):\n \"\"\"\n Retrieve information on a country or regional aggregate. Can specify\n either country_id, or the aggregates, but not both\n\n :country_id: a country id or sequence thereof. None returns all countries\n and aggregates.\n :incomelevel: desired incomelevel id or ids.\n :lendingtype: desired lendingtype id or ids.\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects representing each\n country\n \"\"\"\n if country_id:\n if incomelevel or lendingtype:\n raise ValueError(\"Can't specify country_id and aggregates\")\n return id_only_query(COUNTRIES_URL, country_id, cache=cache)\n args = {}\n if incomelevel:\n args[\"incomeLevel\"] = parse_value_or_iterable(incomelevel)\n if lendingtype:\n args[\"lendingType\"] = parse_value_or_iterable(lendingtype)\n return WBSearchResult(fetcher.fetch(COUNTRIES_URL, args, cache=cache))\n\n\ndef get_indicator(indicator=None, source=None, topic=None, cache=True):\n \"\"\"\n Retrieve information about an indicator or indicators. Only one of\n indicator, source, and topic can be specified. Specifying none of the\n three will return all indicators.\n\n :indicator: an indicator code or sequence thereof\n :source: a source id or sequence thereof\n :topic: a topic id or sequence thereof\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects representing\n indicators\n \"\"\"\n if indicator:\n if source or topic:\n raise ValueError(INDIC_ERROR)\n query_url = \"/\".join((INDICATOR_URL, parse_value_or_iterable(indicator)))\n elif source:\n if topic:\n raise ValueError(INDIC_ERROR)\n query_url = \"/\".join(\n (SOURCES_URL, parse_value_or_iterable(source), \"indicators\")\n )\n elif topic:\n query_url = \"/\".join((TOPIC_URL, parse_value_or_iterable(topic), \"indicators\"))\n else:\n query_url = INDICATOR_URL\n return WBSearchResult(fetcher.fetch(query_url, cache=cache))\n\n\ndef search_indicators(query, source=None, topic=None, cache=True):\n \"\"\"\n Search indicators for a certain regular expression. Only one of source or\n topic can be specified. In interactive mode, will return None and print ids\n and names unless suppress_printing is True.\n\n :query: the term to match against indicator names\n :source: if present, id of desired source\n :topic: if present, id of desired topic\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects representing search\n indicators\n \"\"\"\n indicators = get_indicator(source=source, topic=topic, cache=cache)\n pattern = re.compile(query, re.IGNORECASE)\n return WBSearchResult(i for i in indicators if pattern.search(i[\"name\"]))\n\n\ndef search_countries(query, incomelevel=None, lendingtype=None, cache=True):\n \"\"\"\n Search countries by name. Very simple search.\n\n :query: the string to match against country names\n :incomelevel: if present, search only the matching incomelevel\n :lendingtype: if present, search only the matching lendingtype\n :cache: use the cache\n :returns: WBSearchResult containing dictionary objects representing\n countries\n \"\"\"\n countries = get_country(\n incomelevel=incomelevel, lendingtype=lendingtype, cache=cache\n )\n pattern = re.compile(query, re.IGNORECASE)\n return WBSearchResult(i for i in countries if pattern.search(i[\"name\"]))\n\n\n@uses_pandas\ndef get_dataframe(\n indicators,\n country=\"all\",\n data_date=None,\n source=None,\n convert_date=False,\n keep_levels=False,\n cache=True,\n):\n \"\"\"\n Convenience function to download a set of indicators and merge them into a\n pandas DataFrame. The index will be the same as if calls were made to\n get_data separately.\n\n :indicators: An dictionary where the keys are desired indicators and the\n values are the desired column names\n :country: a country code, sequence of country codes, or \"all\" (default)\n :data_date: the desired date as a datetime object or a 2-sequence with\n start and end dates\n :source: the specific source to retrieve data from (defaults on API to 2,\n World Development Indicators)\n :convert_date: if True, convert date field to a datetime.datetime object.\n :keep_levels: if True don't reduce the number of index levels returned if\n only getting one date or country\n :cache: use the cache\n :returns: a pandas DataFrame\n \"\"\"\n serieses = [\n (\n get_series(\n indicator=indicator,\n country=country,\n data_date=data_date,\n source=source,\n convert_date=convert_date,\n keep_levels=keep_levels,\n cache=cache,\n ).rename(name)\n )\n for indicator, name in indicators.items()\n ]\n print(type(serieses[0]))\n result = None\n for series in serieses:\n if result is None:\n result = series.to_frame()\n else:\n result = result.join(series.to_frame(), how=\"outer\")\n result = WBDataFrame(result)\n result.last_updated = {i.name: i.last_updated for i in serieses}\n return result\n",
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2020/2/13 21:22\nDesc: 存储和读取Token文件\n\"\"\"\nimport os\n\nimport pandas as pd\n\nfrom akshare.pro import cons\n\n\ndef set_token(token):\n df = pd.DataFrame([token], columns=['token'])\n user_home = os.path.expanduser('~')\n fp = os.path.join(user_home, cons.TOKEN_F_P)\n df.to_csv(fp, index=False)\n\n\ndef get_token():\n user_home = os.path.expanduser('~')\n fp = os.path.join(user_home, cons.TOKEN_F_P)\n if os.path.exists(fp):\n df = pd.read_csv(fp)\n return str(df.iloc[0]['token'])\n else:\n print(cons.TOKEN_ERR_MSG)\n return None\n\n\nif __name__ == '__main__':\n pass\n",
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2020/5/27 19:57\nDesc: 新浪财经-股票-机构持股\nhttp://vip.stock.finance.sina.com.cn/q/go.php/vComStockHold/kind/jgcg/index.phtml\n\"\"\"\nimport demjson\nimport pandas as pd\nimport requests\n\n\ndef stock_institute_hold(quarter: str = \"20051\") -> pd.DataFrame:\n \"\"\"\n 新浪财经-股票-机构持股一览表\n http://vip.stock.finance.sina.com.cn/q/go.php/vComStockHold/kind/jgcg/index.phtml\n :param quarter: 从 2005 年开始, {\"一季报\":1, \"中报\":2 \"三季报\":3 \"年报\":4}, e.g., \"20191\", 其中的 1 表示一季报; \"20193\", 其中的 3 表示三季报;\n :type quarter: str\n :return: 机构持股一览表\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://vip.stock.finance.sina.com.cn/q/go.php/vComStockHold/kind/jgcg/index.phtml?symbol=%D6%A4%C8%AF%BC%F2%B3%C6%BB%F2%B4%FA%C2%EB\"\n params = {\n \"p\": \"1\",\n \"num\": \"5000\",\n \"reportdate\": quarter[:-1],\n \"quarter\": quarter[-1],\n }\n r = requests.get(url, params=params)\n temp_df = pd.read_html(r.text)[0]\n temp_df[\"证券代码\"] = temp_df[\"证券代码\"].astype(str).str.zfill(6)\n del temp_df[\"明细\"]\n temp_df.columns = ['证券代码', '证券简称', '机构数', '机构数变化', '持股比例', '持股比例增幅', '占流通股比例', '占流通股比例增幅']\n return temp_df\n\n\ndef stock_institute_hold_detail(stock: str = \"600433\", quarter: str = \"20201\") -> pd.DataFrame:\n \"\"\"\n 新浪财经-股票-机构持股详情\n http://vip.stock.finance.sina.com.cn/q/go.php/vComStockHold/kind/jgcg/index.phtml\n :param stock: 股票代码\n :type stock: str\n :param quarter: 从 2005 年开始, {\"一季报\":1, \"中报\":2 \"三季报\":3 \"年报\":4}, e.g., \"20191\", 其中的 1 表示一季报; \"20193\", 其中的 3 表示三季报;\n :type quarter: str\n :return: 指定股票和财报时间的机构持股数据\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://vip.stock.finance.sina.com.cn/q/api/jsonp.php/var%20details=/ComStockHoldService.getJGCGDetail\"\n params = {\n \"symbol\": stock,\n \"quarter\": quarter,\n }\n r = requests.get(url, params=params)\n text_data = r.text\n json_data = demjson.decode(text_data[text_data.find(\"{\"):-2])\n big_df = pd.DataFrame()\n for item in json_data[\"data\"].keys():\n inner_temp_df = pd.DataFrame(json_data[\"data\"][item]).T.iloc[:-1, :]\n inner_temp_df.reset_index(inplace=True)\n big_df = big_df.append(inner_temp_df, ignore_index=True)\n if not big_df.empty:\n big_df[\"index\"] = big_df[\"index\"].str.split(\"_\", expand=True)[0]\n big_df.rename(columns={\"index\": \"institute\"}, inplace=True)\n big_df = big_df.iloc[:, :12]\n big_df.columns = [\"持股机构类型\",\n \"持股机构代码\",\n \"持股机构简称\",\n \"持股机构全称\",\n \"持股数\",\n \"最新持股数\",\n \"持股比例\",\n \"最新持股比例\",\n \"占流通股比例\",\n \"最新占流通股比例\",\n \"持股比例增幅\",\n \"占流通股比例增幅\",\n ]\n big_df[\"持股机构类型\"] = big_df[\"持股机构类型\"].str.replace(\"fund\", \"基金\")\n big_df[\"持股机构类型\"] = big_df[\"持股机构类型\"].str.replace(\"socialSecurity\", \"全国社保\")\n big_df[\"持股机构类型\"] = big_df[\"持股机构类型\"].str.replace(\"qfii\", \"QFII\")\n return big_df\n else:\n return None\n\n\nif __name__ == '__main__':\n stock_institute_hold_df = stock_institute_hold(quarter=\"20201\")\n print(stock_institute_hold_df)\n\n stock_institute_hold_detail_df = stock_institute_hold_detail(stock=\"300003\", quarter=\"20201\")\n print(stock_institute_hold_detail_df)\n",
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2020/3/14 0:25\nDesc: Futures data from Singapore Exchange\nhttps://www.sgx.com/zh-hans/research-education/derivatives\nhttps://links.sgx.com/1.0.0/derivatives-daily/5888/FUTURE.zip\n\"\"\"\nimport zipfile\nfrom io import BytesIO\nfrom io import StringIO\n\nimport pandas as pd\nimport requests\nfrom tqdm import tqdm\n\nfrom akshare.index.index_investing import index_investing_global\n\n\ndef futures_sgx_daily(trade_date: str = \"2020/03/06\", recent_day: str = \"3\") -> pd.DataFrame:\n \"\"\"\n Futures daily data from sgx\n P.S. it will be slowly if you do not use VPN\n :param trade_date: it means the specific trade day you want to fetch\n :type trade_date: str e.g., \"2020/03/06\"\n :param recent_day: the data range near the specific trade day\n :type recent_day: str e.g. \"3\" means 3 day before specific trade day\n :return: data contains from (trade_date - recent_day) to trade_day\n :rtype: pandas.DataFrame\n \"\"\"\n big_df = pd.DataFrame()\n index_df = index_investing_global(country=\"新加坡\", index_name=\"FTSE Singapore\", start_date=\"2020/01/01\", end_date=trade_date)\n index_df.sort_index(inplace=True)\n index_df.reset_index(inplace=True)\n index_df.reset_index(inplace=True)\n index_df.index = index_df[\"index\"] + 5840\n date_start = index_df.index[-1] + 1 - int(recent_day)\n date_end = index_df.index[-1] + 1\n for page in tqdm(range(date_start, date_end)):\n # page = 5883\n url = f\"https://links.sgx.com/1.0.0/derivatives-daily/{page}/FUTURE.zip\"\n r = requests.get(url)\n with zipfile.ZipFile(BytesIO(r.content)) as file:\n with file.open(file.namelist()[0]) as my_file:\n data = my_file.read().decode()\n if file.namelist()[0].endswith(\"txt\"):\n data_df = pd.read_table(StringIO(data))\n else:\n data_df = pd.read_csv(StringIO(data))\n big_df = big_df.append(data_df)\n return big_df\n\n\nif __name__ == '__main__':\n futures_sgx_daily_df = futures_sgx_daily(trade_date=\"2021/01/23\", recent_day=\"2\")\n print(futures_sgx_daily_df)\n",
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2019/12/4 15:49\nDesc: 获取谷歌指数, 必须使用代理, 获得的数据是小时频率的, 所以获取时间周期太长会很慢\n\"\"\"\nfrom akshare.index.request import TrendReq\nimport matplotlib.pyplot as plt\n\n\ndef google_index(\n word=\"python\", start_date=\"2019-12-01\", end_date=\"2019-12-04\", plot=True\n):\n \"\"\"\n 返回指定区间的谷歌指数\n \"\"\"\n pytrends = TrendReq(hl=\"en-US\", tz=360)\n kw_list = [word]\n pytrends.build_payload(\n kw_list, cat=0, timeframe=start_date + \" \" + end_date, geo=\"\", gprop=\"\"\n )\n search_df = pytrends.interest_over_time()\n if plot:\n search_df[word].plot()\n plt.legend()\n plt.show()\n return search_df[word]\n return search_df[word]\n\n\nif __name__ == \"__main__\":\n google_index_df = google_index(\n word=\"AI\", start_date=\"2019-12-10T10\", end_date=\"2019-12-10T23\", plot=True\n )\n print(google_index_df)\n",
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2021/3/18 16:48\nDesc: 同花顺-板块-概念板块-成份股\nhttp://q.10jqka.com.cn/gn/detail/code/301558/\n\"\"\"\nimport os\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom py_mini_racer import py_mini_racer\nfrom tqdm import tqdm\n\n\ndef _get_js_path_ths(name: str = None, module_file: str = None) -> str:\n \"\"\"\n 获取 JS 文件的路径(从模块所在目录查找)\n :param name: 文件名\n :type name: str\n :param module_file: 模块路径\n :type module_file: str\n :return: 路径\n :rtype: str\n \"\"\"\n module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))\n module_json_path = os.path.join(module_folder, \"stock_feature\", name)\n return module_json_path\n\n\ndef _get_file_content_ths(file_name: str = \"ase.min.js\") -> str:\n \"\"\"\n 获取 JS 文件的内容\n :param file_name: JS 文件名\n :type file_name: str\n :return: 文件内容\n :rtype: str\n \"\"\"\n setting_file_name = file_name\n setting_file_path = _get_js_path_ths(setting_file_name, __file__)\n with open(setting_file_path) as f:\n file_data = f.read()\n return file_data\n\n\ndef stock_board_concept_name_ths() -> pd.DataFrame:\n \"\"\"\n 同花顺-板块-概念板块-概念\n http://q.10jqka.com.cn/gn/detail/code/301558/\n :return: 所有概念板块的名称和链接\n :rtype: pandas.DataFrame\n \"\"\"\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'\n }\n url = 'http://q.10jqka.com.cn/gn/'\n r = requests.get(url, headers=headers)\n soup = BeautifulSoup(r.text, \"lxml\")\n html_list = soup.find('div', attrs={'class': 'boxShadow'}).find_all('a', attrs={'target': '_blank'})\n name_list = [item.text for item in html_list]\n url_list = [item['href'] for item in html_list]\n temp_df = pd.DataFrame([name_list, url_list], index=['name', 'url']).T\n return temp_df\n\n\ndef stock_board_concept_cons_ths(symbol: str = \"阿里巴巴概念\") -> pd.DataFrame:\n \"\"\"\n 同花顺-板块-概念板块-成份股\n http://q.10jqka.com.cn/gn/detail/code/301558/\n :param symbol: 板块名称\n :type symbol: str\n :return: 成份股\n :rtype: pandas.DataFrame\n \"\"\"\n stock_board_ths_map_df = stock_board_concept_name_ths()\n symbol = stock_board_ths_map_df[stock_board_ths_map_df['name'] == symbol]['url'].values[0].split('/')[-2]\n js_code = py_mini_racer.MiniRacer()\n js_content = _get_file_content_ths(\"ths.js\")\n js_code.eval(js_content)\n v_code = js_code.call('v')\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',\n 'Cookie': f'v={v_code}'\n }\n url = f'http://q.10jqka.com.cn/gn/detail/field/264648/order/desc/page/1/ajax/1/code/{symbol}'\n r = requests.get(url, headers=headers)\n soup = BeautifulSoup(r.text, \"lxml\")\n page_num = int(soup.find_all('a', attrs={'class': 'changePage'})[-1]['page'])\n big_df = pd.DataFrame()\n for page in tqdm(range(1, page_num+1)):\n v_code = js_code.call('v')\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',\n 'Cookie': f'v={v_code}'\n }\n url = f'http://q.10jqka.com.cn/gn/detail/field/264648/order/desc/page/{page}/ajax/1/code/{symbol}'\n r = requests.get(url, headers=headers)\n temp_df = pd.read_html(r.text)[0]\n big_df = big_df.append(temp_df, ignore_index=True)\n big_df.rename({\"涨跌幅(%)\": \"涨跌幅\",\n \"涨速(%)\": \"涨速\",\n \"换手(%)\": \"换手\",\n \"振幅(%)\": \"振幅\",\n }, inplace=True, axis=1)\n del big_df['加自选']\n big_df['代码'] = big_df['代码'].astype(str).str.zfill(6)\n return big_df\n\n\ndef stock_board_concept_info_ths(symbol: str = \"阿里巴巴概念\") -> pd.DataFrame:\n \"\"\"\n 同花顺-板块-概念板块-板块简介\n http://q.10jqka.com.cn/gn/detail/code/301558/\n :param symbol: 板块简介\n :type symbol: str\n :return: 板块简介\n :rtype: pandas.DataFrame\n \"\"\"\n stock_board_ths_map_df = stock_board_concept_name_ths()\n symbol_code = stock_board_ths_map_df[stock_board_ths_map_df['name'] == symbol]['url'].values[0].split('/')[-2]\n url = f'http://q.10jqka.com.cn/gn/detail/code/{symbol_code}/'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',\n }\n r = requests.get(url, headers=headers)\n soup = BeautifulSoup(r.text, 'lxml')\n name_list = [item.text for item in soup.find('div', attrs={'class': 'board-infos'}).find_all('dt')]\n value_list = [item.text.strip().replace('\\n', '/') for item in soup.find('div', attrs={'class': 'board-infos'}).find_all('dd')]\n temp_df = pd.DataFrame([name_list, value_list]).T\n temp_df.columns = ['项目', \"值\"]\n return temp_df\n\n\nif __name__ == '__main__':\n stock_board_concept_name_ths_df = stock_board_concept_name_ths()\n print(stock_board_concept_name_ths_df)\n\n stock_board_concept_cons_ths_df = stock_board_concept_cons_ths(symbol=\"边缘计算\")\n print(stock_board_concept_cons_ths_df)\n\n stock_board_concept_info_ths_df = stock_board_concept_info_ths(symbol=\"阿里巴巴概念\")\n print(stock_board_concept_info_ths_df)\n",
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2020/10/7 22:43\nDesc: 收盘收益率曲线历史数据\nhttp://www.chinamoney.com.cn/chinese/bkcurvclosedyhis/?bondType=CYCC000&reference=1\n\"\"\"\nimport pandas as pd\nimport requests\n\n\ndef bond_china_close_return_map() -> pd.DataFrame:\n \"\"\"\n 收盘收益率曲线历史数据\n http://www.chinamoney.com.cn/chinese/bkcurvclosedyhis/?bondType=CYCC000&reference=1\n :return: 收盘收益率曲线历史数据\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://www.chinamoney.com.cn/ags/ms/cm-u-bk-currency/ClsYldCurvCurvGO\"\n r = requests.post(url)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"records\"])\n return temp_df\n\n\ndef bond_china_close_return(\n symbol: str = \"政策性金融债(进出口行)\",\n start_date: str = \"2020-08-30\",\n end_date: str = \"2020-09-30\",\n) -> pd.DataFrame:\n \"\"\"\n 收盘收益率曲线历史数据\n http://www.chinamoney.com.cn/chinese/bkcurvclosedyhis/?bondType=CYCC000&reference=1\n :param symbol: 需要获取的指标\n :type symbol: str\n :param start_date: 开始日期, 结束日期和开始日期不要超过 1 个月\n :type start_date: str\n :param end_date: 结束日期, 结束日期和开始日期不要超过 1 个月\n :type end_date: str\n :return: 收盘收益率曲线历史数据\n :rtype: pandas.DataFrame\n \"\"\"\n name_code_df = bond_china_close_return_map()\n symbol_code = name_code_df[name_code_df[\"cnLabel\"] == symbol][\"value\"].values[0]\n url = \"http://www.chinamoney.com.cn/ags/ms/cm-u-bk-currency/ClsYldCurvHis\"\n params = {\n \"lang\": \"CN\",\n \"reference\": \"1\",\n \"bondType\": symbol_code,\n \"startDate\": start_date,\n \"endDate\": end_date,\n \"termId\": \"0.5\",\n \"pageNum\": \"1\",\n \"pageSize\": \"5000\",\n }\n r = requests.post(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"records\"])\n del temp_df[\"newDateValue\"]\n temp_df.columns = [\n \"到期收益率\",\n \"远期收益率\",\n \"日期\",\n \"期限\",\n \"即期收益率\",\n ]\n temp_df = temp_df[\n [\n \"日期\",\n \"期限\",\n \"到期收益率\",\n \"即期收益率\",\n \"远期收益率\",\n ]\n ]\n return temp_df\n\n\nif __name__ == \"__main__\":\n bond_china_close_return_df = bond_china_close_return(\n symbol=\"政策性金融债(进出口行)\", start_date=\"2020-08-30\", end_date=\"2020-09-30\"\n )\n print(bond_china_close_return_df)\n"
] | [
[
"pandas.read_table",
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.read_csv",
"pandas.DataFrame"
],
[
"pandas.DataFrame",
"pandas.read_html"
],
[
"pandas.DataFrame"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
],
[
"pandas.DataFrame",
"pandas.read_html"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
xinjianlv/pycorrector | [
"697fc09032d129b2777cf686bb05663f2fc3c04f"
] | [
"pycorrector/transformers/models/bert_generation/modeling_bert_generation.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model specific for generation. \"\"\"\n\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions\nfrom ...modeling_utils import PreTrainedModel\nfrom ..bert.modeling_bert import BertEncoder\nfrom .configuration_bert_generation import BertGenerationConfig\n\nfrom pycorrector.utils.logger import logger\n\n\n_CONFIG_FOR_DOC = \"BertGenerationConfig\"\n_TOKENIZER_FOR_DOC = \"BertGenerationTokenizer\"\n\n\ndef load_tf_weights_in_bert_generation(\n model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False\n):\n try:\n import numpy as np\n import tensorflow.compat.v1 as tf\n\n import tensorflow_hub as hub\n import tensorflow_text # noqa: F401\n\n tf.disable_eager_execution()\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_model = hub.Module(tf_hub_path)\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n init.run()\n all_variables = tf_model.variable_map\n keep_track_variables = all_variables.copy()\n for key in list(all_variables.keys()):\n if \"global\" in key:\n logger.info(f\"Skipping {key}...\")\n continue\n if not is_encoder:\n model_pointer = getattr(model, model_class)\n else:\n model_pointer = model\n is_embedding = False\n logger.info(f\"Trying to match {key}...\")\n # remove start_string = \"module/bert/\"\n sub_layers = key.split(\"/\")[2:]\n if is_encoder_named_decoder and sub_layers[0] == \"encoder\":\n logger.info(f\"Skipping encoder layer {key} for decoder\")\n continue\n if is_encoder and sub_layers[0] == \"decoder\":\n logger.info(f\"Skipping decoder layer {key} for encoder\")\n continue\n for i, sub_layer in enumerate(sub_layers):\n if sub_layer == \"embeddings\":\n is_embedding = True\n elif sub_layer == \"LayerNorm\":\n is_embedding = False\n if \"layer\" in sub_layer:\n model_pointer = model_pointer.layer[int(sub_layer.split(\"_\")[-1])]\n elif sub_layer in [\"kernel\", \"gamma\"]:\n model_pointer = model_pointer.weight\n elif sub_layer == \"beta\":\n model_pointer = model_pointer.bias\n elif sub_layer == \"encdec\":\n model_pointer = model_pointer.crossattention.self\n elif sub_layer == \"encdec_output\":\n model_pointer = model_pointer.crossattention.output\n elif is_encoder_named_decoder and sub_layer == \"decoder\":\n model_pointer = model_pointer.encoder\n else:\n if sub_layer == \"attention\" and \"encdec\" in sub_layers[i + 1]:\n continue\n try:\n model_pointer = getattr(model_pointer, sub_layer)\n except AttributeError:\n logger.info(f\"Skipping to initialize {key} at {sub_layer}...\")\n raise AttributeError\n\n array = np.asarray(sess.run(all_variables[key]))\n if not is_embedding:\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, key))\n array = np.transpose(array)\n else:\n model_pointer = model_pointer.weight\n\n try:\n assert (\n model_pointer.shape == array.shape\n ), f\"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (model_pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {key}\")\n\n model_pointer.data = torch.from_numpy(array.astype(np.float32))\n keep_track_variables.pop(key, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(\", \".join(keep_track_variables.keys())))\n return model\n\n\nclass BertGenerationEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word and position embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n\n embeddings = inputs_embeds + position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertGenerationPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n model_files.\n \"\"\"\n\n config_class = BertGenerationConfig\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nBERT_GENERATION_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BertGenerationConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nBERT_GENERATION_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertGenerationTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.\",\n BERT_GENERATION_START_DOCSTRING,\n)\nclass BertGenerationEncoder(BertGenerationPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n This model should be used when leveraging Bert or Roberta checkpoints for the\n :class:`~transformers.EncoderDecoderModel` class as described in `Leveraging Pre-trained Checkpoints for Sequence\n Generation Tasks <https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n self.embeddings = BertGenerationEmbeddings(config)\n self.encoder = BertEncoder(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/bert_for_seq_generation_L-24_bbc_encoder\",\n output_type=BaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for\n tokens that are NOT MASKED, ``0`` for MASKED tokens.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask = None\n if not use_cache:\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[1:]\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=sequence_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\nclass BertGenerationOnlyLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n logits = self.decoder(hidden_states)\n return logits\n\n\n@add_start_docstrings(\n \"\"\"BertGeneration Model with a `language modeling` head on top for CLM fine-tuning. \"\"\",\n BERT_GENERATION_START_DOCSTRING,\n)\nclass BertGenerationDecoder(BertGenerationPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warn(\"If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`\")\n\n self.bert = BertGenerationEncoder(config)\n self.lm_head = BertGenerationOnlyLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig\n >>> import torch\n\n >>> tokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')\n >>> config = BertGenerationConfig.from_pretrained(\"google/bert_for_seq_generation_L-24_bbc_encoder\")\n >>> config.is_decoder = True\n >>> model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_token_type_ids=False, return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[1:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.zeros",
"torch.arange",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"torch.nn.Linear",
"numpy.transpose",
"tensorflow.compat.v1.disable_eager_execution"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ankur3107/zenml | [
"5dc05a833b50ac9cc49e851b9d91255da6016dfd"
] | [
"examples/functional_api/chapter_4.py"
] | [
"# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.base import ClassifierMixin\nfrom sklearn.linear_model import LogisticRegression\n\nfrom zenml.integrations.constants import SKLEARN, TENSORFLOW\nfrom zenml.pipelines import pipeline\nfrom zenml.repository import Repository\nfrom zenml.steps import BaseStepConfig, Output, step\n\n\nclass TrainerConfig(BaseStepConfig):\n \"\"\"Trainer params\"\"\"\n\n epochs: int = 1\n gamma: float = 0.7\n lr: float = 0.001\n\n\n@step\ndef importer_mnist() -> Output(\n X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray\n):\n \"\"\"Download the MNIST data store it as an artifact\"\"\"\n (X_train, y_train), (\n X_test,\n y_test,\n ) = tf.keras.datasets.mnist.load_data()\n return X_train, y_train, X_test, y_test\n\n\n@step\ndef normalize_mnist(\n X_train: np.ndarray, X_test: np.ndarray\n) -> Output(X_train_normed=np.ndarray, X_test_normed=np.ndarray):\n \"\"\"Normalize the values for all the images so they are between 0 and 1\"\"\"\n X_train_normed = X_train / 255.0\n X_test_normed = X_test / 255.0\n return X_train_normed, X_test_normed\n\n\n@step\ndef tf_trainer(\n config: TrainerConfig,\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> tf.keras.Model:\n \"\"\"Train a neural net from scratch to recognize MNIST digits return our\n model or the learner\"\"\"\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(10, activation=\"relu\"),\n tf.keras.layers.Dense(10),\n ]\n )\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(0.001),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n\n model.fit(\n X_train,\n y_train,\n epochs=config.epochs,\n )\n\n # write model\n return model\n\n\n@step\ndef tf_evaluator(\n X_test: np.ndarray,\n y_test: np.ndarray,\n model: tf.keras.Model,\n) -> float:\n \"\"\"Calculate the loss for the model for each epoch in a graph\"\"\"\n\n _, test_acc = model.evaluate(X_test, y_test, verbose=2)\n return test_acc\n\n\n@step\ndef sklearn_trainer(\n config: TrainerConfig,\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> ClassifierMixin:\n \"\"\"Train SVC from sklearn.\"\"\"\n clf = LogisticRegression(penalty=\"l1\", solver=\"saga\", tol=0.1)\n clf.fit(X_train.reshape((X_train.shape[0], -1)), y_train)\n return clf\n\n\n@step\ndef sklearn_evaluator(\n X_test: np.ndarray,\n y_test: np.ndarray,\n model: ClassifierMixin,\n) -> float:\n \"\"\"Calculate accuracy score with classifier.\"\"\"\n\n test_acc = model.score(X_test.reshape((X_test.shape[0], -1)), y_test)\n return test_acc\n\n\n@pipeline(required_integrations=[SKLEARN, TENSORFLOW])\ndef mnist_pipeline(\n importer,\n normalizer,\n trainer,\n evaluator,\n):\n # Link all the steps artifacts together\n X_train, y_train, X_test, y_test = importer()\n X_trained_normed, X_test_normed = normalizer(X_train=X_train, X_test=X_test)\n model = trainer(X_train=X_trained_normed, y_train=y_train)\n evaluator(X_test=X_test_normed, y_test=y_test, model=model)\n\n\n# Run the pipeline\n# Initialize a pipeline run\ntf_p = mnist_pipeline(\n importer=importer_mnist(),\n normalizer=normalize_mnist(),\n trainer=tf_trainer(config=TrainerConfig(epochs=1)),\n evaluator=tf_evaluator(),\n)\n\n# Run the pipeline\ntf_p.run()\n\n\n# Initialize a new pipeline run\nscikit_p = mnist_pipeline(\n importer=importer_mnist(),\n normalizer=normalize_mnist(),\n trainer=sklearn_trainer(config=TrainerConfig()),\n evaluator=sklearn_evaluator(),\n)\n\n# Run the new pipeline\nscikit_p.run()\n\n# Post execution flow\nrepo = Repository()\np = repo.get_pipeline(pipeline_name=\"mnist_pipeline\")\nprint(f\"Pipeline `mnist_pipeline` has {len(p.runs)} run(s)\")\nfor r in p.runs[0:2]:\n eval_step = r.get_step(\"evaluator\")\n print(\n f\"For {eval_step.entrypoint_name}, the accuracy is: \"\n f\"{eval_step.output.read():.2f}\"\n )\n"
] | [
[
"sklearn.linear_model.LogisticRegression",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
ersilia-os/osm-series4-candidates-2 | [
"a0b7f55d79c65182dcc4c102791d2ababbfb176e"
] | [
"scripts/4_similarity.py"
] | [
"from __init__ import OUTPUT\n\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.DataStructs import BulkTanimotoSimilarity\nimport os, sys\n\nprint(\"SIMILARITY SCORES\")\n\ndef mols_to_fingerprints(molecules, radius=3, useCounts=False, useFeatures=True):\n fingerprints = [AllChem.GetMorganFingerprint(\n mol,\n radius,\n useCounts=useCounts,\n useFeatures=useFeatures\n ) for mol in tqdm(molecules)]\n return fingerprints\n\nraw_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"data\", \"raw\")\nsys.path.append(raw_folder)\n\n#get series4 molecules for tanimoto similarity\ns4 = pd.read_csv(os.path.join(raw_folder, \"series4_processed.csv\"))\ns4_smiles = s4[\"smiles\"].tolist()\ns4_mols = [Chem.MolFromSmiles(smi) for smi in s4_smiles]\nref_fps=mols_to_fingerprints(s4_mols)\n\n\ndf = pd.read_csv(os.path.join(OUTPUT, \"data_3.csv\"))\nsmiles=df[\"Smiles\"].tolist()\nmols = [Chem.MolFromSmiles(smi) for smi in tqdm(smiles)]\nfps=mols_to_fingerprints(mols)\nsims = []\nfor fp in tqdm(fps):\n sim=BulkTanimotoSimilarity(fp, ref_fps)\n maxsim = np.max(sim)\n sims += [maxsim]\n\ndf[\"Similarity\"]=sims\ndf=df[df[\"Similarity\"] <= 0.70]\n\ndf.to_csv(os.path.join(OUTPUT, \"data_4.csv\"), index = False)\n"
] | [
[
"numpy.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OpenSourceEconomics/grmpy | [
"3ff5ec9cd108582c23cb61e6b8d87f4db6ceaee1"
] | [
"grmpy/read/read_auxiliary.py"
] | [
"\"\"\"This module provides auxiliary functions for the import process of the init file.\"\"\"\nimport numpy as np\n\n\ndef create_attr_dict_est(init_dict, semipar=False, include_constant=False):\n \"\"\"This function processes the imported initialization file so that it fulfills the\n requirements for the subsequent estimation process.\n \"\"\"\n init_dict[\"AUX\"] = {\"init_values\"}\n init_values = []\n\n if semipar is True:\n if include_constant is True:\n init_dict = add_constant(init_dict, semipar)\n else:\n pass\n\n init_dict = read_keys_semipar(init_dict, init_values)\n\n # semipar is False\n else:\n if include_constant is True:\n init_dict = add_constant(init_dict, semipar)\n else:\n pass\n\n init_dict = read_keys_par(init_dict, init_values)\n\n init_dict = provide_auxiliary_information(init_dict, init_values)\n\n return init_dict\n\n\ndef create_attr_dict_sim(init_dict):\n \"\"\"This function processes the imported initialization file so that it fulfills the\n requirements for the following simulation and estimation process.\n \"\"\"\n init_dict[\"AUX\"] = {\"init_values\"}\n init_values = []\n\n init_dict = read_keys_par(init_dict, init_values)\n init_dict = provide_auxiliary_information(init_dict, init_values)\n\n return init_dict\n\n\ndef add_constant(init_dict, semipar=False):\n \"\"\"The function checks if the user has provided a constant\n for the relevant subsections:\n [\"TREATED\", \"UNTREATED\", \"CHOICE\"] for the parametric, and\n [\"CHOICE\"] for the semiparamteric estimation, respectively.\n \"\"\"\n\n if semipar is True:\n if \"const\" not in init_dict[\"CHOICE\"][\"order\"]:\n init_dict[\"CHOICE\"][\"order\"].insert(0, \"const\")\n init_dict[\"CHOICE\"][\"params\"] = np.array([1.0])\n else:\n pass\n\n # semipar is False\n else:\n for key in [\"TREATED\", \"UNTREATED\", \"CHOICE\"]:\n if \"const\" not in init_dict[key][\"order\"]:\n init_dict[key][\"order\"].insert(0, \"const\")\n init_dict[key][\"params\"] = np.array([1.0])\n else:\n pass\n\n return init_dict\n\n\ndef read_keys_par(init_dict, init_values):\n \"\"\"This function reads the information provided by the\n [\"TREATED\", \"UNTREATED\", \"CHOICE\", \"DIST\"] keys for\n the simulation and parametric estimation.\n \"\"\"\n for key in [\"TREATED\", \"UNTREATED\", \"CHOICE\", \"DIST\"]:\n if \"params\" in init_dict[key].keys():\n init_dict[key][\"params\"] = np.array(init_dict[key][\"params\"])\n init_values += list(init_dict[key][\"params\"])\n else:\n init_values += [0.0] * len(init_dict[key][\"order\"])\n\n if np.all(init_dict[\"DIST\"][\"params\"] == 0):\n init_dict[\"DETERMINISTIC\"] = True\n else:\n init_dict[\"DETERMINISTIC\"] = False\n\n return init_dict\n\n\ndef read_keys_semipar(init_dict, init_values):\n \"\"\"This function reads the information provided by the\n [\"TREATED\", \"UNTREATED\", \"CHOICE\"] keys for\n semiparametric estimation.\n \"\"\"\n for key in [\"TREATED\", \"UNTREATED\", \"CHOICE\"]:\n if \"params\" in init_dict[key].keys():\n init_dict[key][\"params\"] = np.array(init_dict[key][\"params\"])\n init_values += list(init_dict[key][\"params\"])\n else:\n init_values += [0.0] * len(init_dict[key][\"order\"])\n\n return init_dict\n\n\ndef provide_auxiliary_information(init_dict, init_values):\n \"\"\"This function generates auxiliary information\n given the parameters in the initialization dictionary\n \"\"\"\n num_covars = len(\n set(\n init_dict[\"TREATED\"][\"order\"]\n + init_dict[\"UNTREATED\"][\"order\"]\n + init_dict[\"CHOICE\"][\"order\"]\n )\n )\n\n covar_label = []\n for section in [\"TREATED\", \"UNTREATED\", \"CHOICE\"]:\n covar_label += [i for i in init_dict[section][\"order\"] if i not in covar_label]\n\n # Generate the AUX section that include some additional auxiliary information\n init_dict[\"AUX\"] = {\n \"init_values\": np.array(init_values),\n \"num_covars_choice\": len(init_dict[\"CHOICE\"][\"order\"]),\n \"num_covars_treated\": len(init_dict[\"TREATED\"][\"order\"]),\n \"num_covars_untreated\": len(init_dict[\"UNTREATED\"][\"order\"]),\n \"num_paras\": len(init_values) + 1,\n \"num_covars\": num_covars,\n \"labels\": covar_label,\n }\n\n return init_dict\n"
] | [
[
"numpy.all",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mwizasimbeye11/hub | [
"d743b0f14ee538e8bb50006895779b048d0f4db1"
] | [
"tensorflow_hub/feature_column_test.py"
] | [
"# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow_hub.feature_column.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\ntry:\n import mock as mock\nexcept ImportError:\n import unittest.mock as mock\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow_hub import tf_v1\n\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.feature_column import feature_column_v2\nfrom tensorflow.python.ops.lookup_ops import HashTable\nfrom tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer\n# pylint: enable=g-direct-tensorflow-import\n\n\ndef text_module_fn():\n embeddings = [\n (\"\", [0, 0, 0, 0]), # OOV items are mapped to this embedding.\n (\"hello world\", [1, 2, 3, 4]),\n (\"pair-programming\", [5, 5, 5, 5]),\n ]\n keys = tf.constant([item[0] for item in embeddings], dtype=tf.string)\n indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64)\n tbl_init = KeyValueTensorInitializer(keys, indices)\n table = HashTable(tbl_init, 0)\n\n weights_initializer = tf.cast(\n tf.constant(list([item[1] for item in embeddings])), tf.float32)\n\n weights = tf_v1.get_variable(\n \"weights\", dtype=tf.float32, initializer=weights_initializer)\n\n text_tensor = tf_v1.placeholder(dtype=tf.string, name=\"text\", shape=[None])\n indices_tensor = table.lookup(text_tensor)\n embedding_tensor = tf.gather(weights, indices_tensor)\n hub.add_signature(inputs=text_tensor, outputs=embedding_tensor)\n\n\ndef invalid_text_module_fn():\n text = tf_v1.placeholder(tf.string, shape=[10])\n hub.add_signature(inputs=text, outputs=tf.zeros([10, 3]))\n\n\ndef export_module_spec(spec, export_path):\n \"\"\"Export module with random initialization.\"\"\"\n with tf_v1.Graph().as_default():\n m = hub.Module(spec)\n with tf_v1.Session() as session:\n session.run(tf_v1.initializers.global_variables())\n m.export(export_path, session)\n\n\nclass CommonColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n @mock.patch.object(feature_column_v2._StateManagerImpl, \"add_resource\")\n def testFeatureColumnsWithResources(self, mock_add_resource):\n feature_column = hub.text_embedding_column(\"text_a\", self.spec)\n if not isinstance(feature_column, feature_column_v2.FeatureColumn):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n self.assertTrue(feature_column_v2.is_feature_column_v2([feature_column]))\n\n @mock.patch.object(feature_column_v2._StateManagerImpl, \"add_resource\")\n def testFeatureColumnsWithNoResources(self, mock_add_resource):\n mock_add_resource.side_effect = NotImplementedError\n feature_column = hub.text_embedding_column(\"text_a\", self.spec)\n self.assertFalse(feature_column_v2.is_feature_column_v2([feature_column]))\n\n\nclass TextEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n def testVariableShape(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n self.assertEqual(text_column._variable_shape, [4])\n\n def testParents(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n self.assertEqual([\"text\"], text_column.parents)\n\n def testMakeParseExampleSpec(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])\n self.assertEqual(parsing_spec,\n {\"text\": tf_v1.FixedLenFeature([1], dtype=tf.string)})\n\n def testInputLayer(self):\n features = {\n \"text_a\": [\"hello world\", \"pair-programming\"],\n \"text_b\": [\"hello world\", \"oov token\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text_a\", self.spec, trainable=False),\n hub.text_embedding_column(\"text_b\", self.spec, trainable=False),\n ]\n with tf.Graph().as_default():\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllEqual(\n output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])\n\n def testDenseFeatures(self):\n features = {\n \"text_a\": [\"hello world\", \"pair-programming\"],\n \"text_b\": [\"hello world\", \"oov token\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text_a\", self.spec, trainable=False),\n hub.text_embedding_column(\"text_b\", self.spec, trainable=False),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n # We want to test with dense_features_v2.DenseFeatures. This symbol was\n # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a.\n feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)\n feature_layer_out = feature_layer(features)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(feature_layer_out)\n self.assertAllEqual(\n output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])\n\n def testDenseFeatures_shareAcrossApplication(self):\n features = {\n \"text\": [\"hello world\", \"pair-programming\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text\", self.spec, trainable=True),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n # We want to test with dense_features_v2.DenseFeatures. This symbol was\n # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a.\n feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)\n feature_layer_out_1 = feature_layer(features)\n feature_layer_out_2 = feature_layer(features)\n\n # We define loss only on the first layer. Since layers should have shared\n # weights, we expect the second layer will change too.\n loss = feature_layer_out_1 - tf.constant(0.005)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.7)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n before_update_1 = sess.run(feature_layer_out_1)\n sess.run(train_op)\n after_update_1 = sess.run(feature_layer_out_1)\n after_update_2 = sess.run(feature_layer_out_2)\n\n self.assertAllEqual(before_update_1, [[1, 2, 3, 4],\n [5, 5, 5, 5]])\n self.assertAllEqual(after_update_1, after_update_2)\n\n def testWorksWithCannedEstimator(self):\n comment_embedding_column = hub.text_embedding_column(\n \"comment\", self.spec, trainable=False)\n upvotes = tf_v1.feature_column.numeric_column(\"upvotes\")\n\n feature_columns = [comment_embedding_column, upvotes]\n estimator = tf_v1.estimator.DNNClassifier(\n hidden_units=[10],\n feature_columns=feature_columns,\n model_dir=self.get_temp_dir())\n\n # This only tests that estimator apis are working with the feature\n # column without throwing exceptions.\n features = {\n \"comment\": np.array([\n [\"the quick brown fox\"],\n [\"spam spam spam\"],\n ]),\n \"upvotes\": np.array([\n [20],\n [1],\n ]),\n }\n labels = np.array([[1], [0]])\n numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn\n input_fn = numpy_input_fn(features, labels, shuffle=True)\n estimator.train(input_fn, max_steps=1)\n estimator.evaluate(input_fn, steps=1)\n estimator.predict(input_fn)\n\n def testTrainableEmbeddingColumn(self):\n feature_columns = [\n hub.text_embedding_column(\"text\", self.spec, trainable=True),\n ]\n\n with tf.Graph().as_default():\n features = {\n \"text\": [\"hello world\", \"pair-programming\"],\n }\n target = [[1, 1, 1, 1], [4, 3, 2, 1]]\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n\n loss = tf.cast(\n tf_v1.losses.mean_squared_error(input_layer, target), tf.float64)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])\n for _ in range(10):\n sess.run(train_op)\n self.assertAllClose(sess.run(input_layer), target, atol=0.5)\n\n def testInvalidTextModule(self):\n spec = hub.create_module_spec(invalid_text_module_fn)\n with self.assertRaisesRegexp(ValueError, \"only one input\"):\n hub.text_embedding_column(\"coment\", spec, trainable=False)\n\n def testConfig(self):\n module_path = os.path.join(self.get_temp_dir(), \"module\")\n export_module_spec(self.spec, module_path)\n text_column = hub.text_embedding_column(\"text\", module_path)\n config = text_column.get_config()\n cloned_text_column = hub.feature_column._TextEmbeddingColumn.from_config(\n config)\n self.assertEqual(cloned_text_column.module_spec_path,\n text_column.module_spec_path)\n\n with self.assertRaisesRegexp(NotImplementedError, \"Can only generate\"):\n text_column = hub.text_embedding_column(\"text\", self.spec)\n config = text_column.get_config()\n\n\ndef create_image_module_fn(randomly_initialized=False):\n def image_module_fn():\n \"\"\"Maps 1x2 images to sums of each color channel.\"\"\"\n images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3])\n if randomly_initialized:\n initializer = tf_v1.random_uniform_initializer(\n minval=-1, maxval=1, dtype=tf.float32)\n else:\n initializer = tf_v1.constant_initializer(1.0, dtype=tf.float32)\n weight = tf_v1.get_variable(\n name=\"weight\", shape=[1], initializer=initializer)\n sum_channels = tf.reduce_sum(images, axis=[1, 2]) * weight\n hub.add_signature(inputs={\"images\": images}, outputs=sum_channels)\n return image_module_fn\n\n\nclass ImageEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(create_image_module_fn())\n self.randomly_initialized_spec = hub.create_module_spec(\n create_image_module_fn(randomly_initialized=True))\n\n def testExpectedImageSize(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n # The usage comment recommends this code pattern, so we test it here.\n self.assertSequenceEqual(\n hub.get_expected_image_size(image_column.module_spec), [1, 2])\n\n def testVariableShape(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n self.assertEqual(image_column.variable_shape, [3])\n\n def testParents(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n self.assertEqual([\"image\"], image_column.parents)\n\n def testMakeParseExampleSpec(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([image_column])\n self.assertEqual(\n parsing_spec,\n {\"image\": tf_v1.FixedLenFeature([1, 2, 3], dtype=tf.float32)})\n\n def testInputLayer(self):\n features = {\n \"image_a\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n \"image_b\": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],\n [[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image_a\", self.spec),\n hub.image_embedding_column(\"image_b\", self.spec),\n ]\n with tf.Graph().as_default():\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllClose(\n output,\n [[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])\n\n def testDenseFeatures(self):\n features = {\n \"image_a\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n \"image_b\": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],\n [[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image_a\", self.spec),\n hub.image_embedding_column(\"image_b\", self.spec),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n # We want to test with dense_features_v2.DenseFeatures. This symbol was\n # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a.\n feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)\n feature_layer_out = feature_layer(features)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(feature_layer_out)\n self.assertAllClose(\n output,\n [[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])\n\n def testDenseFeatures_shareAcrossApplication(self):\n features = {\n \"image\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image\", self.randomly_initialized_spec),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n # We want to test with dense_features_v2.DenseFeatures. This symbol was\n # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a.\n feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)\n feature_layer_out_1 = feature_layer(features)\n feature_layer_out_2 = feature_layer(features)\n\n with tf_v1.train.MonitoredSession() as sess:\n output_1 = sess.run(feature_layer_out_1)\n output_2 = sess.run(feature_layer_out_2)\n\n self.assertAllClose(output_1, output_2)\n\n def testWorksWithCannedEstimator(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n other_column = tf_v1.feature_column.numeric_column(\"number\")\n\n feature_columns = [image_column, other_column]\n estimator = tf_v1.estimator.DNNClassifier(\n hidden_units=[10],\n feature_columns=feature_columns,\n model_dir=self.get_temp_dir())\n\n # This only tests that estimator apis are working with the feature\n # column without throwing exceptions.\n features = {\n \"image\":\n np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n dtype=np.float32),\n \"number\":\n np.array([[20], [1]]),\n }\n labels = np.array([[1], [0]])\n numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn\n input_fn = numpy_input_fn(features, labels, shuffle=True)\n estimator.train(input_fn, max_steps=1)\n estimator.evaluate(input_fn, steps=1)\n estimator.predict(input_fn)\n\n def testConfig(self):\n module_path = os.path.join(self.get_temp_dir(), \"module\")\n export_module_spec(self.spec, module_path)\n image_column = hub.image_embedding_column(\"image\", module_path)\n config = image_column.get_config()\n cloned_image_column = hub.feature_column._ImageEmbeddingColumn.from_config(\n config)\n self.assertEqual(cloned_image_column.module_spec_path,\n image_column.module_spec_path)\n\n with self.assertRaisesRegexp(NotImplementedError, \"Can only generate\"):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n config = image_column.get_config()\n\n def testName(self):\n image_column = hub.image_embedding_column(\n tf.feature_column.numeric_column(\"image\"), self.spec)\n self.assertEqual(\"image_hub_module_embedding\", image_column.name)\n\n\nclass SparseTextEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n def testVariableShape(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, combiner=\"mean\", default_value=None, trainable=False)\n self.assertEqual(text_column._variable_shape, [4])\n\n def testMakeParseExampleSpec(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, combiner=\"mean\", default_value=None, trainable=False)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])\n self.assertEqual(parsing_spec, {\"text\": tf_v1.VarLenFeature(tf.string)})\n\n def testParents(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, \"sum\", \"\", trainable=False)\n self.assertEqual([\"text\"], text_column.parents)\n\n def testInputLayer(self):\n with tf.Graph().as_default():\n text_a = tf.SparseTensor(\n values=[\"hello world\", \"pair-programming\", \"hello world\"],\n indices=[[0, 0], [0, 1], [1, 0]],\n dense_shape=[2, 2])\n text_b = tf.SparseTensor(\n values=[\"hello world\", \"oov token\"],\n indices=[[0, 0], [0, 1]],\n dense_shape=[2, 3])\n\n features = {\n \"text_a\": text_a,\n \"text_b\": text_b,\n }\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text_a\",\n self.spec,\n combiner=\"mean\",\n default_value=\"__UNKNOWN__\",\n trainable=False),\n hub.sparse_text_embedding_column(\n \"text_b\",\n self.spec,\n combiner=\"mean\",\n default_value=\"__UNKNOWN__\",\n trainable=False),\n ]\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllEqual(\n output,\n [[3, 3.5, 4, 4.5, 0.5, 1, 1.5, 2], [1, 2, 3, 4, 0, 0, 0, 0]])\n # ([1, 2, 3, 4] + [5, 5, 5, 5])/2 extend ([1, 2, 3, 4] + [0, 0, 0, 0])/2\n # [1, 2, 3, 4] extend [0, 0, 0, 0]\n\n def testTrainableEmbeddingColumn(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=None,\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=[\"hello world\", \"pair-programming\"],\n indices=[[0, 0], [1, 0]],\n dense_shape=[2, 2])\n\n target = [[1, 1, 1, 1], [4, 3, 2, 1]]\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n loss = tf_v1.losses.mean_squared_error(input_layer, target)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])\n for _ in range(10):\n sess.run(train_op)\n self.assertAllClose(sess.run(input_layer), target, atol=0.5)\n\n def testEmptySparseTensorBatch(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=\"default\",\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=tf_v1.constant([], dtype=tf_v1.string, shape=[0]),\n indices=tf_v1.constant([], dtype=tf_v1.int64, shape=[0, 2]),\n dense_shape=[3, 0])\n\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n with tf_v1.train.MonitoredSession() as sess:\n embeddings = sess.run(input_layer)\n self.assertAllEqual(embeddings,\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n\n def testEmptySparseTensorRow(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=\"default\",\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=tf_v1.constant([\"hello world\"], dtype=tf_v1.string, shape=[1]),\n indices=tf_v1.constant([[0, 0]], dtype=tf_v1.int64, shape=[1, 2]),\n dense_shape=[2, 1])\n\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n with tf_v1.train.MonitoredSession() as sess:\n embeddings = sess.run(input_layer)\n self.assertAllEqual(embeddings, [[1, 2, 3, 4], [0, 0, 0, 0]])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.zeros",
"tensorflow.compat.v2.keras.layers.DenseFeatures",
"tensorflow.reduce_sum",
"tensorflow.python.ops.lookup_ops.HashTable",
"tensorflow.test.main",
"tensorflow.SparseTensor",
"tensorflow.gather",
"tensorflow.feature_column.numeric_column",
"tensorflow.python.ops.lookup_ops.KeyValueTensorInitializer",
"numpy.array",
"tensorflow.python.feature_column.feature_column_v2.is_feature_column_v2"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Laurans/procgen_adventure | [
"5f88f3f647f7854c8fb2ae516f3490d89845eefa"
] | [
"procgen_adventure/utils/torch_utils.py"
] | [
"import numpy as np\nimport torch\nimport torch.distributed as dist\n\n\ndef tensor(x, device):\n if isinstance(x, torch.Tensor):\n return x.to(device)\n\n x = np.asarray(x, dtype=np.float)\n x = torch.tensor(x, device=device, dtype=torch.float32)\n return x\n\n\ndef input_preprocessing(x, device):\n x = tensor(x, device)\n x = x.float()\n x /= 255.0\n return x\n\n\ndef to_np(t):\n return t.cpu().detach().numpy()\n\n\ndef random_seed(seed=None):\n np.random.seed(seed)\n torch.manual_seed(np.random.randint(int(1e6)))\n\n\ndef restore_model(model, save_path):\n checkpoint = torch.load(save_path)\n model.network.load_state_dict(checkpoint[\"model_state_dict\"])\n model.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n update = checkpoint[\"update\"]\n return update\n\n\ndef sync_initial_weights(model):\n for param in model.parameters():\n dist.broadcast(param.data, src=0)\n\n\ndef sync_gradients(model):\n for param in model.parameters():\n dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)\n\n\ndef cleanup():\n dist.destroy_process_group()\n\n\ndef sync_values(tensor_sum_values, tensor_nb_values):\n dist.reduce(tensor_sum_values, dst=0)\n dist.reduce(tensor_nb_values, dst=0)\n return tensor_sum_values / tensor_nb_values\n\n\ndef range_tensor(t, device):\n return torch.arange(t).long().to(device)\n\n\ndef zeros(shape, dtype):\n \"\"\"Attempt to return torch tensor of zeros, or if numpy dtype provided,\n return numpy array or zeros.\"\"\"\n try:\n return torch.zeros(shape, dtype=dtype)\n except TypeError:\n return np.zeros(shape, dtype=dtype)\n"
] | [
[
"torch.distributed.broadcast",
"numpy.random.seed",
"torch.load",
"numpy.asarray",
"torch.zeros",
"torch.arange",
"torch.tensor",
"torch.distributed.reduce",
"torch.distributed.destroy_process_group",
"torch.distributed.all_reduce",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vgliner/Chlng_20_Sub | [
"169d098e5315510df83ad988c7e2067317cef4cf"
] | [
"ECG_Dataloader_Brazilian_records.py"
] | [
"from torch.utils.data import Dataset\nimport os\nimport scipy.io as sio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport pandas as pd\nimport random\nfrom scipy.io import loadmat\nimport Utils\nfrom scipy import interpolate\nfrom scipy import signal\nimport csv\nfrom scipy.signal import butter, lfilter, freqz\nimport re\nfrom glob import glob\nimport time\nimport pickle\n\n\n\"\"\"\nIt contains annotations about 6 different ECGs abnormalities:\n- 1st degree AV block (1dAVb);\n- right bundle branch block (RBBB);\n- left bundle branch block (LBBB);\n- sinus bradycardia (SB);\n- atrial fibrillation (AF); \n- sinus tachycardia (ST).\n\nNotation of multiclass_to_binary_type: \n[-1] Return multiclass [0] I-AVB, [1] RBBB, [2] LBBB, [3] SB, [4] AF, [5] ST\n\"\"\"\n\nPRINT_FLAG = False\n\n\nclass ECG_Multilead_Dataset_Brazilian_records(Dataset):\n def __init__(self, root_dir=None, transform=None, multiclass=False,\n binary_class_type=1, apply_aurmentation=True, random_augmentation=True,\n augmentation_method=None, record_length=60, to_normalize=True, Uploading_method='HDD',\n old_format= False):\n # record_length [sec]\n # Uploading_method = 'HDD'\\'RAM'\\'cache'\n super().__init__()\n self.data = []\n self.samples = None\n self.root_dir = root_dir\n self.transform = transform\n self.multiclass = multiclass\n self.binary_class_type = binary_class_type\n self.apply_aurmentation = apply_aurmentation\n self.random_augmentation = random_augmentation\n self.augmentation_method = augmentation_method\n self.database_length = 0\n self.data_mutual_sample_rate = 500\n self.record_length = record_length * self.data_mutual_sample_rate\n self.to_normalize = to_normalize\n self.Uploading_method = Uploading_method\n self.brazilian_database_path = None\n self.brazilian_annotations_path = None\n self.sample_rate = 400\n self.maximal_length = self.sample_rate * self.record_length\n\n if not multiclass:\n assert binary_class_type >= 0, 'Class selection is mandatory for single class classification'\n\n if self.root_dir is None:\n paths = Utils.read_config_file()\n self.brazilian_database_path = paths[1]\n self.brazilian_annotations_path = paths[2]\n self.brazilian_annotations_dict_path = paths[3]\n\n else:\n self.brazilian_database_path = self.root_dir + dataset_filename\n\n self.f = h5py.File(self.brazilian_database_path, \"r\")\n self.data_ids = np.array(self.f['id_exam'])\n self.data = self.f['signal']\n start = time.process_time()\n self.annotations = pd.read_csv(self.brazilian_annotations_path)\n end = time.process_time()\n print(f'Uploading annotations took {end-start} sec.')\n start = time.process_time()\n\n # Convert Data Frame to Dictionary (set_index method allows any column to be used as index)\n with open(self.brazilian_annotations_dict_path, 'rb') as handle:\n self.annotations_dict = pickle.load(handle)\n #self.annotations_dict = self.annotations.set_index('id_exam').transpose().to_dict(orient='dict')\n end = time.process_time()\n print(f'Uploading annotations dictionary took {end-start} sec.')\n print('finished')\n\n self.loaded_data = {}\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n\n if idx not in self.loaded_data.keys():\n sample = self.data[idx]\n data_id = self.data_ids[idx]\n sample = np.transpose(sample)\n annotation = self.annotations_dict[data_id]\n annotation = list(annotation.values())[3:]\n sample = (sample, annotation)\n else:\n sample = self.loaded_data[idx]\n\n if self.to_normalize:\n sample = self.normalization(sample)\n\n if self.binary_class_type >= 0 and not self.multiclass:\n sample[1] = sample[1][int(self.binary_class_type)]\n\n if self.multiclass:\n sample[1] = np.stack(sample[1])\n\n if self.Uploading_method == 'cache' and idx not in self.loaded_data.keys():\n self.loaded_data[idx] = sample\n\n if self.apply_aurmentation:\n sample = self.augmentation_algorithm(sample)\n\n return sample\n\n def find_annotations(self, id_to_find):\n a= list(self.annotations['id_exam']).index(id_to_find)\n return list(self.annotations.iloc[a].values[4:])\n\n @staticmethod\n def plot(sample):\n item_to_plot = sample[0]\n fig, axes = plt.subplots(nrows=6, ncols=2)\n fig.suptitle(np.array2string(sample[1]), fontsize=14)\n titles = ['Lead1', 'Lead2', 'Lead3', 'aVR', 'aVL', 'aVF', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']\n b = item_to_plot\n for ax, cntr in zip(axes.flatten(), range(12)):\n ax.plot(b[cntr, :], linewidth=1.0)\n ax.set(title=titles[cntr])\n plt.plot()\n plt.show()\n return\n\n @staticmethod\n def plot_one_strip(one_strip):\n item_to_plot = one_strip\n plt.plot(item_to_plot)\n plt.show()\n return\n\n\n def augmentation_algorithm(self, record):\n current_record_length = record[0].shape[1]\n if current_record_length == self.record_length:\n return record\n if current_record_length <= self.record_length: # record is shorter than maximal length or similar\n new_sample = np.zeros((12, self.record_length))\n index_for_pasting = random.sample(range(self.record_length - current_record_length), 1)\n new_sample[:, index_for_pasting[0]:index_for_pasting[0] + current_record_length] = record[0]\n else: # record is longer than maximal length\n index_for_pasting = random.sample(range(current_record_length - self.record_length), 1)\n new_sample = record[0][:, index_for_pasting[0]:index_for_pasting[0] + self.record_length]\n return [new_sample, record[1]]\n\n @staticmethod\n def normalization(record):\n sample = record[0]\n for i, strip in enumerate(sample):\n max_ = np.max(strip)\n min_ = np.min(strip)\n if max_ - min_ == 0:\n sample[i] = strip\n else:\n sample[i] = (strip - min_) / (max_ - min_)\n return [sample, record[1]] \n\n\ndef test_Brazilian_db_dataloader():\n print('Testing Brazilian database')\n ds = ECG_Multilead_Dataset_Brazilian_records()\n start = time.process_time()\n for record_counter in range(len(ds)):\n ds_record = ds[record_counter]\n # ds.plot(ds_record)\n if record_counter %10000 ==0:\n stop = time.process_time()\n print(f'Loaded record # {record_counter}, time : {stop-start}')\n print('Finished testing')\n\n\nif __name__ == \"__main__\":\n test_Brazilian_db_dataloader()\n"
] | [
[
"pandas.read_csv",
"numpy.min",
"matplotlib.pyplot.subplots",
"numpy.stack",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.transpose",
"numpy.array2string",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
f0nzie/rTorch | [
"40292ecd8a9ac1af6a03247cbb5f7a3227d60e2f"
] | [
"inst/python/torchtools/data_util.py"
] | [
"import gzip\nimport os\nfrom os import path\nimport numpy as np\n\nimport sys\nif sys.version_info.major < 3:\n import urllib\nelse:\n import urllib.request as request\n\n\nDATASET_DIR = 'datasets/'\n\nMNIST_FILES = [\"train-images-idx3-ubyte.gz\", \"train-labels-idx1-ubyte.gz\",\n \"t10k-images-idx3-ubyte.gz\", \"t10k-labels-idx1-ubyte.gz\"]\n\n\ndef download_file(url, local_path):\n dir_path = path.dirname(local_path)\n if not path.exists(dir_path):\n print(\"Creating the directory '%s' ...\" % dir_path)\n os.makedirs(dir_path)\n\n print(\"Downloading from '%s' ...\" % url)\n if sys.version_info.major < 3:\n urllib.URLopener().retrieve(url, local_path)\n else:\n request.urlretrieve(url, local_path)\n\n\ndef download_mnist(local_path):\n url_root = \"http://yann.lecun.com/exdb/mnist/\"\n for f_name in MNIST_FILES:\n f_path = os.path.join(local_path, f_name)\n if not path.exists(f_path):\n download_file(url_root + f_name, f_path)\n\n\ndef one_hot(x, n):\n if type(x) == list:\n x = np.array(x)\n x = x.flatten()\n o_h = np.zeros((len(x), n))\n o_h[np.arange(len(x)), x] = 1\n return o_h\n\n\ndef load_mnist(ntrain=60000, ntest=10000, onehot=True):\n data_dir = os.path.join(DATASET_DIR, 'mnist_digits/')\n if not path.exists(data_dir):\n download_mnist(data_dir)\n else:\n # check all files\n checks = [path.exists(os.path.join(data_dir, f)) for f in MNIST_FILES]\n if not np.all(checks):\n download_mnist(data_dir)\n\n with gzip.open(os.path.join(data_dir, 'train-images-idx3-ubyte.gz')) as fd:\n buf = fd.read()\n loaded = np.frombuffer(buf, dtype=np.uint8)\n trX = loaded[16:].reshape((60000, 28 * 28)).astype(float)\n\n with gzip.open(os.path.join(data_dir, 'train-labels-idx1-ubyte.gz')) as fd:\n buf = fd.read()\n loaded = np.frombuffer(buf, dtype=np.uint8)\n trY = loaded[8:].reshape((60000))\n\n with gzip.open(os.path.join(data_dir, 't10k-images-idx3-ubyte.gz')) as fd:\n buf = fd.read()\n loaded = np.frombuffer(buf, dtype=np.uint8)\n teX = loaded[16:].reshape((10000, 28 * 28)).astype(float)\n\n with gzip.open(os.path.join(data_dir, 't10k-labels-idx1-ubyte.gz')) as fd:\n buf = fd.read()\n loaded = np.frombuffer(buf, dtype=np.uint8)\n teY = loaded[8:].reshape((10000))\n\n trX /= 255.\n teX /= 255.\n\n trX = trX[:ntrain]\n trY = trY[:ntrain]\n\n teX = teX[:ntest]\n teY = teY[:ntest]\n\n if onehot:\n trY = one_hot(trY, 10)\n teY = one_hot(teY, 10)\n else:\n trY = np.asarray(trY)\n teY = np.asarray(teY)\n\n return trX, teX, trY, teY\n"
] | [
[
"numpy.asarray",
"numpy.frombuffer",
"numpy.array",
"numpy.all"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sunher/game | [
"84b01b2c69b5cdecbc301fb0e56380ff06bfe353"
] | [
"snakeai/gameplayAttackAndHideRandom/environmentattackandhiderandom.py"
] | [
"import pprint\nimport random\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nfrom .entities import Snake, Field, CellType, SnakeAction, ALL_SNAKE_ACTIONS, SnakeDirection, Point\n\n\nclass EnvironmentAttackAndHideRandom(object):\n \"\"\"\n Represents the RL environment for the Snake game that implements the game logic,\n provides rewards for the agent and keeps track of game statistics.\n \"\"\"\n\n def __init__(self, config, verbose=1):\n \"\"\"\n Create a new Snake RL environment.\n\n Args:\n config (dict): level configuration, typically found in JSON configs.\n verbose (int): verbosity level:\n 0 = do not write any debug information;\n 1 = write a CSV file containing the statistics for every episode;\n 2 = same as 1, but also write a full log file containing the state of each timestep.\n \"\"\"\n self.field = Field(level_map=config['field'])\n self.snake = None\n self.fruit = []\n self.poison = []\n self.poison_num = 0\n self.initial_snake_length = config['initial_snake_length']\n self.rewards = config['rewards']\n self.max_step_limit = config.get('max_step_limit', 1000)\n self.is_game_over = False\n\n self.timestep_index = 0\n self.current_action = None\n self.stats = EpisodeStatistics()\n self.verbose = verbose\n self.debug_file = None\n self.stats_file = None\n self.enemy = None\n\n def seed(self, value):\n \"\"\" Initialize the random state of the environment to make results reproducible. \"\"\"\n random.seed(value)\n np.random.seed(value)\n\n def get_random_empty_cell(self):\n return self.field.get_random_empty_cell()\n\n @property\n def observation_shape(self):\n \"\"\" Get the shape of the state observed at each timestep. \"\"\"\n return self.field.size, self.field.size\n\n @property\n def num_actions(self):\n \"\"\" Get the number of actions the agent can take. \"\"\"\n return len(ALL_SNAKE_ACTIONS)\n\n def new_episode(self):\n \"\"\" Reset the environment and begin a new episode. \"\"\"\n self.field.create_level()\n self.generate_rand_wall()\n # print(self.field._cells)\n self.stats.reset()\n self.timestep_index = 0\n\n self.enemy = None\n self.fruit = []\n self.poison = []\n self.poison_num = 0\n self.snake = Snake(self.field.get_random_empty_cell(), length=self.initial_snake_length)\n self.field.place_snake(self.snake)\n self.generate_emeny()\n self.generate_poison()\n self.current_action = None\n self.is_game_over = False\n\n result = TimestepResult(\n observation=self.get_observation(),\n reward=0,\n is_episode_end=self.is_game_over\n )\n\n self.record_timestep_stats(result)\n return result\n\n def getResult(self):\n result = TimestepResult(\n observation=self.get_observation(),\n reward=0,\n is_episode_end=self.is_game_over\n )\n\n self.record_timestep_stats(result)\n return result\n\n def record_timestep_stats(self, result):\n \"\"\" Record environment statistics according to the verbosity level. \"\"\"\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n\n # Write CSV header for the stats file.\n if self.verbose >= 1 and self.stats_file is None:\n self.stats_file = open('snake-env-{timestamp}.csv', 'w')\n stats_csv_header_line = self.stats.to_dataframe()[:0].to_csv(index=None)\n # print(stats_csv_header_line, self.stats_file, '', flush=True)\n\n # Create a blank debug log file.\n # if self.verbose >= 2 and self.debug_file is None:\n # self.debug_file = open('snake-env-{timestamp}.log', 'w')\n\n self.stats.record_timestep(self.current_action, result)\n self.stats.timesteps_survived = self.timestep_index\n\n # if self.verbose >= 2:\n # print(result, self.debug_file)\n\n # # Log episode stats if the appropriate verbosity level is set.\n # if result.is_episode_end:\n # if self.verbose >= 1:\n # stats_csv_line = self.stats.to_dataframe().to_csv(header=False, index=None)\n # print(stats_csv_line, self.stats_file, '', flush=True)\n # if self.verbose >= 2:\n # print(self.stats, self.debug_file)\n\n def get_observation(self):\n \"\"\" Observe the state of the environment. \"\"\"\n return np.copy(self.field._cells)\n\n def choose_action(self, action):\n \"\"\" Choose the action that will be taken at the next timestep. \"\"\"\n\n self.current_action = action\n if action == SnakeAction.TURN_LEFT1:\n self.snake.turn_left()\n elif action == SnakeAction.TURN_LEFT2:\n self.snake.turn_left()\n self.snake.turn_left()\n elif action == SnakeAction.TURN_LEFT3:\n self.snake.turn_left()\n self.snake.turn_left()\n self.snake.turn_left()\n elif action == SnakeAction.TURN_RIGHT1:\n self.snake.turn_right()\n elif action == SnakeAction.TURN_RIGHT2:\n self.snake.turn_right()\n self.snake.turn_right()\n elif action == SnakeAction.TURN_RIGHT3:\n self.snake.turn_right()\n self.snake.turn_right()\n self.snake.turn_right()\n\n def create_wall(self, pos):\n # self.point(pos).type = PointType.WALL\n self.field[pos] = CellType.WALL\n\n def create_fix_wall_1(self):\n wall_pos = [Point(3, 4), Point(3, 5), Point(3, 6), Point(3, 7), Point(3, 8), Point(3, 9),\n Point(6, 3), Point(6, 4), Point(6, 5), Point(6, 8), Point(6, 9), Point(6, 10),\n Point(7, 6),\n Point(8, 5), Point(8, 8),\n Point(9, 4), Point(9, 9),\n Point(10, 3), Point(10, 5), Point(10, 6), Point(10, 7), Point(10, 8), Point(10, 10),\n Point(11, 11)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_2(self):\n wall_pos = [Point(2, 3), Point(2, 10),\n Point(3, 3), Point(3, 10),\n Point(4, 4), Point(4, 9),\n Point(5, 5), Point(5, 8),\n Point(6, 6), Point(6, 7),\n Point(7, 3), Point(7, 10),\n Point(8, 3), Point(8, 6), Point(8, 7), Point(8, 10),\n Point(9, 3), Point(9, 6), Point(9, 7), Point(9, 10),\n Point(10, 4), Point(10, 5), Point(10, 8), Point(10, 9)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_3(self):\n wall_pos = [Point(3, 2), Point(3, 3), Point(3, 8), Point(3, 9),\n Point(4, 4), Point(4, 7), Point(4, 10),\n Point(5, 4), Point(5, 7), Point(5, 10),\n Point(6, 3),\n Point(7, 2), Point(7, 7), Point(7, 10),\n Point(8, 2), Point(8, 7), Point(8, 10),\n Point(9, 2), Point(9, 7), Point(9, 10),\n Point(10, 3), Point(10, 4), Point(10, 8), Point(10, 9)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_4(self):\n wall_pos = [Point(3, 3), Point(3, 7), Point(3, 8), Point(3, 9),\n Point(4, 3), Point(4, 6), Point(4, 10),\n Point(5, 3), Point(5, 6), Point(5, 10),\n Point(6, 8),\n Point(7, 3), Point(7, 6), Point(7, 10),\n Point(8, 3), Point(8, 6), Point(8, 10),\n Point(9, 3), Point(9, 6), Point(9, 10),\n Point(10, 3), Point(10, 7), Point(10, 8), Point(10, 9)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_5(self):\n wall_pos = [Point(1, 2), Point(1, 6), Point(1, 7), Point(1, 11),\n Point(2, 1), Point(2, 4), Point(2, 9), Point(2, 12),\n Point(3, 3), Point(3, 6), Point(3, 7), Point(3, 10),\n Point(4, 2), Point(4, 5), Point(4, 8), Point(4, 11),\n Point(5, 4), Point(5, 9),\n Point(6, 1), Point(6, 3), Point(6, 10), Point(6, 12),\n Point(7, 1), Point(7, 3), Point(7, 10), Point(7, 12),\n Point(8, 4), Point(8, 9),\n Point(9, 2), Point(9, 5), Point(9, 8), Point(9, 11),\n Point(10, 3), Point(10, 6), Point(10, 7), Point(10, 10),\n Point(11, 1), Point(11, 4), Point(11, 9), Point(11, 12),\n Point(12, 2), Point(12, 6), Point(12, 7), Point(12, 11)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_6(self):\n wall_pos = [Point(1, 3), Point(1, 6), Point(1, 9),\n Point(2, 2), Point(2, 5), Point(2, 8), Point(2, 11),\n Point(3, 1), Point(3, 4), Point(3, 7), Point(3, 10), Point(3, 12),\n Point(4, 3), Point(4, 6), Point(4, 9),\n Point(5, 2), Point(5, 8), Point(5, 11),\n Point(6, 1), Point(6, 4), Point(6, 12),\n Point(7, 3), Point(7, 10),\n Point(8, 2), Point(8, 5), Point(8, 8), Point(8, 11),\n Point(9, 1), Point(9, 4), Point(9, 6), Point(9, 9), Point(9, 12),\n Point(10, 3), Point(10, 7), Point(10, 10),\n Point(11, 2), Point(11, 5), Point(11, 8), Point(11, 11),\n Point(12, 3), Point(12, 6), Point(12, 9), Point(12, 12)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_7(self):\n wall_pos = [Point(2, 2), Point(2, 11),\n Point(3, 3), Point(3, 4), Point(3, 5), Point(3, 6), Point(3, 7), Point(3, 8), Point(3, 9),\n Point(3, 10),\n Point(5, 3), Point(5, 10),\n Point(6, 3), Point(6, 10),\n Point(7, 3), Point(7, 10),\n Point(8, 3), Point(8, 10),\n Point(10, 3), Point(10, 4), Point(10, 5), Point(10, 6), Point(10, 7), Point(10, 8), Point(10, 9),\n Point(10, 10),\n Point(11, 2), Point(11, 11)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_8(self):\n wall_pos = [Point(1, 3), Point(1, 4), Point(1, 9), Point(1, 10),\n Point(2, 3), Point(2, 4), Point(2, 9), Point(2, 10),\n Point(3, 3), Point(3, 4), Point(3, 9), Point(3, 10),\n Point(6, 1), Point(6, 2), Point(6, 3), Point(6, 4), Point(6, 9), Point(6, 10), Point(6, 11),\n Point(6, 12),\n Point(7, 1), Point(7, 2), Point(7, 3), Point(7, 4), Point(7, 9), Point(7, 10), Point(7, 11),\n Point(7, 12),\n Point(10, 3), Point(10, 4), Point(10, 9), Point(10, 10),\n Point(11, 3), Point(11, 4), Point(11, 9), Point(11, 10),\n Point(12, 3), Point(12, 4), Point(12, 9), Point(12, 10)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def create_fix_wall_9(self):\n wall_pos = [Point(3, 5), Point(3, 6),\n Point(4, 4), Point(4, 10),\n Point(5, 4), Point(5, 5), Point(5, 7), Point(5, 8), Point(5, 9),\n Point(6, 4), Point(6, 9),\n Point(7, 3),\n Point(8, 5), Point(8, 6), Point(8, 7), Point(8, 8),\n Point(9, 2), Point(9, 6),\n Point(10, 3), Point(10, 4), Point(10, 6), Point(10, 10),\n Point(11, 2), Point(11, 9), Point(11, 11),\n Point(12, 10)]\n for pos in wall_pos:\n self.create_wall(pos)\n\n def generate_rand_wall(self):\n fixnum = np.random.uniform()\n if fixnum < 0.5:\n randomnum = np.random.randint(1, 9)\n funlist = {1: self.create_fix_wall_1, 2: self.create_fix_wall_2, 3: self.create_fix_wall_3,\n 4: self.create_fix_wall_4, 5: self.create_fix_wall_5, 6: self.create_fix_wall_6,\n 7: self.create_fix_wall_7, 8: self.create_fix_wall_8, 9: self.create_fix_wall_9}\n funlist[randomnum]()\n return\n self.generate_wall()\n\n # empty_pos = []\n # for i in range(1, self._num_rows - 1):\n # for j in range(1, self._num_cols - 1):\n # t = self._content[i][j].type\n # if t == PointType.EMPTY:\n # empty_pos.append(Pos(i, j))\n\n # empty_pos = self.field.get_empty_cell()\n # wall num\n # wallNum = np.random.randint(10, 50)\n # wall4rate = np.random.uniform()\n # h_pos = None\n # if wall4rate < 0.5:\n # if empty_pos:\n # h_pos = random.choice(empty_pos)\n # w_pos1 = h_pos.adj(Direc.LEFT)\n # w_pos2 = h_pos.adj(Direc.UP)\n # w_pos3 = h_pos.adj(Direc.RIGHT)\n # w_pos4 = h_pos.adj(Direc.DOWN)\n # for pos in [w_pos1, w_pos2, w_pos3, w_pos4]:\n # if pos in empty_pos:\n # self.create_wall(pos)\n # empty_pos.remove(pos)\n # wallNum -= 1\n\n # while wallNum > 0:\n # w_pos = random.choice(empty_pos)\n # if h_pos != w_pos:\n # self.create_wall(w_pos)\n # empty_pos.remove(w_pos)\n # wallNum -= 1\n\n def generate_wall(self):\n # emptyNum = len(self.field._empty_cells)\n randnum = np.random.randint(10, 60)\n i=0\n while(i<randnum):\n pos = random.choice(self.field.get_empty_cell())\n i+=1\n self.field[pos] = CellType.WALL\n\n def generate_emeny(self, position=None):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n if position is None:\n position = self.field.get_random_empty_cell()\n self.enemy = position\n self.field[position] = CellType.SNAKE_BODY\n if np.random.random() > 0.2:\n if (self.field[position + SnakeDirection.NORTH] == CellType.EMPTY):\n self.field[position + SnakeDirection.NORTH] = CellType.FRUIT\n self.fruit.append(position + SnakeDirection.NORTH)\n if (self.field[position + SnakeDirection.SOUTH] == CellType.EMPTY):\n self.field[position + SnakeDirection.SOUTH] = CellType.FRUIT\n self.fruit.append(position + SnakeDirection.SOUTH)\n if (self.field[position + SnakeDirection.WEST] == CellType.EMPTY):\n self.field[position + SnakeDirection.WEST] = CellType.FRUIT\n self.fruit.append(position + SnakeDirection.WEST)\n if (self.field[position + SnakeDirection.EAST] == CellType.EMPTY):\n self.field[position + SnakeDirection.EAST] = CellType.FRUIT\n self.fruit.append(position + SnakeDirection.EAST)\n if np.random.random() < 0.1:\n position = self.field.get_random_empty_cell()\n self.field[position] = CellType.FRUIT\n self.fruit.append(position)\n if np.random.random() < 0.1:\n position = self.field.get_random_empty_cell()\n self.field[position] = CellType.FRUIT\n self.fruit.append(position)\n\n def generate_snake(self, snake=None):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n self.snake = snake\n self.field.place_snake(self.snake)\n\n def generate_poison(self):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n if np.random.random() < 0:\n self.poison_num = random.Random().choice([1, 2, 3])\n for position in self.field.get_empty_cell():\n if (0 < position.x <= self.poison_num or 0 < position.y <= self.poison_num or (\n position.x + self.poison_num) >= (self.field.size - 1) or (position.y + self.poison_num) >= (\n self.field.size - 1)):\n self.field[position] = CellType.POISON\n self.poison.append(position)\n\n def be_poison(self, position):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n # if np.random.random() < 1:\n if (0 < position.x <= self.poison_num or 0 < position.y <= self.poison_num or (\n position.x + self.poison_num) >= (self.field.size - 1) or (position.y + self.poison_num) >= (\n self.field.size - 1)):\n return True\n return False\n\n def timestep(self):\n \"\"\" Execute the timestep and return the new observable state. \"\"\"\n\n self.timestep_index += 1\n reward = 0\n isdie = False\n old_head = self.snake.head\n old_tail = self.snake.tail\n\n # Are we about to eat the fruit?\n if self.fruit.__contains__(self.snake.peek_next_move()):\n self.fruit.remove(self.snake.peek_next_move())\n # self.generate_fruit()\n # old_tail = None\n reward += self.rewards['ate_fruit']\n self.stats.fruits_eaten += 1\n elif self.be_poison(self.snake.peek_next_move()):\n self.stats.poisons_eaten += 1\n # If not, just move forward.\n\n self.snake.move()\n\n self.field.update_snake_footprint(old_head, old_tail, self.snake.head)\n\n # Hit a wall or own body?\n if not self.is_alive():\n # reward -=self.fruit.__len__()\n if self.has_hit_wall() or self.has_hit_own_body():\n self.stats.termination_reason = 'hit_wall'\n reward -= 0.7\n isdie = True\n self.field[self.snake.head] = CellType.SNAKE_HEAD\n self.is_game_over = True\n # reward *= 0.7\n # print(self.fruit.__len__())\n # if(self.get_wall_num(old_head) >= 2) and self.fruit.__len__()<=1:\n # reward = self.get_wall_num(old_head) - self.fruit.__len__()\n # else:\n # reward = -1\n reward += (self.get_wall_num(old_head) - 1.5)\n if self.snake.length == 2 or self.snake.length == 1:\n reward -= 2\n\n if self.stats.poisons_eaten != 0:\n reward -= 2\n\n if (self.be_poison(old_head)):\n reward -= 1\n\n # reward += 0.99\n # Exceeded the limit of moves?\n if self.timestep_index >= self.max_step_limit:\n self.is_game_over = True\n self.stats.termination_reason = 'timestep_limit_exceeded'\n\n result = TimestepResult(\n observation=self.get_observation(),\n reward=reward,\n is_episode_end=self.is_game_over\n )\n\n self.record_timestep_stats(result)\n return result\n\n def get_wall_num(self, position=None):\n num = 0\n if self.field[position + SnakeDirection.NORTH] == CellType.WALL:\n num += 1\n if self.field[position + SnakeDirection.SOUTH] == CellType.WALL:\n num += 1\n if self.field[position + SnakeDirection.WEST] == CellType.WALL:\n num += 1\n if self.field[position + SnakeDirection.EAST] == CellType.WALL:\n num += 1\n if self.field[\n position + SnakeDirection.NORTH] == CellType.POISON:\n num += 0.5\n if self.field[\n position + SnakeDirection.SOUTH] == CellType.POISON:\n num += 0.5\n if self.field[\n position + SnakeDirection.WEST] == CellType.POISON:\n num += 0.5\n if self.field[\n position + SnakeDirection.EAST] == CellType.POISON:\n num += 0.5\n return num\n\n def generate_fruit(self, position=None):\n \"\"\" Generate a new fruit at a random unoccupied cell. \"\"\"\n if position is None:\n position = self.field.get_random_empty_cell()\n self.field[position] = CellType.FRUIT\n self.fruit.append(position)\n\n def has_hit_wall(self):\n \"\"\" True if the snake has hit a wall, False otherwise. \"\"\"\n return self.field[self.snake.head] == CellType.WALL\n\n def has_hit_own_body(self):\n \"\"\" True if the snake has hit its own body, False otherwise. \"\"\"\n return self.field[self.snake.head] == CellType.SNAKE_BODY\n\n def is_alive(self):\n \"\"\" True if the snake is still alive, False otherwise. \"\"\"\n return not self.has_hit_wall() and not self.has_hit_own_body()\n\n\nclass TimestepResult(object):\n \"\"\" Represents the information provided to the agent after each timestep. \"\"\"\n\n def __init__(self, observation, reward, is_episode_end):\n self.observation = observation\n self.reward = reward\n self.is_episode_end = is_episode_end\n\n def __str__(self):\n field_map = '\\n'.join([\n ''.join(str(cell) for cell in row)\n for row in self.observation\n ])\n return '{field_map}\\nR = {self.reward} {self.is_episode_end}\\n'\n\n\nclass EpisodeStatistics(object):\n \"\"\" Represents the summary of the agent's performance during the episode. \"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n \"\"\" Forget all previous statistics and prepare for a new episode. \"\"\"\n self.timesteps_survived = 0\n self.sum_episode_rewards = 0\n self.fruits_eaten = 0\n self.poisons_eaten = 0\n self.termination_reason = None\n self.action_counter = {\n action: 0\n for action in ALL_SNAKE_ACTIONS\n }\n\n def record_timestep(self, action, result):\n \"\"\" Update the stats based on the current timestep results. \"\"\"\n self.sum_episode_rewards += result.reward\n if action is not None:\n self.action_counter[action] += 1\n\n def flatten(self):\n \"\"\" Format all episode statistics as a flat object. \"\"\"\n flat_stats = {\n 'timesteps_survived': self.timesteps_survived,\n 'sum_episode_rewards': self.sum_episode_rewards,\n 'mean_reward': self.sum_episode_rewards / self.timesteps_survived if self.timesteps_survived else None,\n 'fruits_eaten': self.fruits_eaten,\n 'termination_reason': self.termination_reason,\n }\n flat_stats.update({\n 'action_counter_{action}': self.action_counter.get(action, 0)\n for action in ALL_SNAKE_ACTIONS\n })\n return flat_stats\n\n def to_dataframe(self):\n \"\"\" Convert the episode statistics to a Pandas data frame. \"\"\"\n return pd.DataFrame([self.flatten()])\n\n def __str__(self):\n return pprint.pformat(self.flatten())\n"
] | [
[
"numpy.random.random",
"numpy.random.seed",
"numpy.copy",
"numpy.random.uniform",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KennyKangMPC/chebpy | [
"5ad603b15f90a0f36093f1705e3e08d090330cef",
"5ad603b15f90a0f36093f1705e3e08d090330cef"
] | [
"tests/test_bndfun.py",
"chebpy/core/settings.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"Unit-tests for pyfun/core/bndfun.py\"\"\"\n\nfrom __future__ import division\n\nimport itertools\nimport operator\nimport unittest\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom chebpy.core.bndfun import Bndfun\nfrom chebpy.core.chebtech import Chebtech2\nfrom chebpy.core.settings import DefaultPrefs\nfrom chebpy.core.utilities import Interval\nfrom chebpy.core.algorithms import standard_chop\n\nfrom tests.utilities import testfunctions, infnorm\n\n# aliases\npi = np.pi\nsin = np.sin\ncos = np.cos\nexp = np.exp\neps = DefaultPrefs.eps\n\n# NOTE: since (Fun/ClassicFun/)Bndfun is not a user-facing class (although it\n# is not abstract) we will test the interface in the way Chebfun will interact\n# with it, which means working explcitly with Interval objects. Furthermore,\n# since we have already tested the adaptive constructor in the Chebtech-level\n# tests, we just use the adaptive constructor in these tests.\n\nclass ClassUsage(unittest.TestCase):\n \"\"\"Unit-tests for miscelaneous Bndfun class usage\"\"\"\n\n def setUp(self):\n f = lambda x: sin(30*x)\n subinterval = Interval(-2,3)\n self.f = f\n self.ff = Bndfun.initfun_adaptive(f, subinterval)\n self.xx = subinterval(np.linspace(-1,1,100))\n self.emptyfun = Bndfun(Chebtech2.initempty(), subinterval)\n self.constfun = Bndfun(Chebtech2.initconst(1.), subinterval)\n\n # tests for emptiness of Bndfun objects\n def test_isempty_True(self):\n self.assertTrue(self.emptyfun.isempty)\n self.assertFalse(not self.emptyfun.isempty)\n\n def test_isempty_False(self):\n self.assertFalse(self.constfun.isempty)\n self.assertTrue(not self.constfun.isempty)\n\n # tests for constantness of Bndfun objects\n def test_isconst_True(self):\n self.assertTrue(self.constfun.isconst)\n self.assertFalse(not self.constfun.isconst)\n\n def test_isconst_False(self):\n self.assertFalse(self.emptyfun.isconst)\n self.assertTrue(not self.emptyfun.isconst)\n\n # check the size() method is working properly\n def test_size(self):\n cfs = np.random.rand(10)\n subinterval = Interval()\n b0 = Bndfun(Chebtech2(np.array([])), subinterval)\n b1 = Bndfun(Chebtech2(np.array([1.])), subinterval)\n b2 = Bndfun(Chebtech2(cfs), subinterval)\n self.assertEquals(b0.size, 0)\n self.assertEquals(b1.size, 1)\n self.assertEquals(b2.size, cfs.size)\n\n def test_support(self):\n a, b = self.ff.support\n self.assertEqual(a, -2)\n self.assertEqual(b, 3)\n\n def test_endvalues(self):\n a, b = self.ff.support\n fa, fb = self.ff.endvalues\n self.assertLessEqual(abs(fa-self.f(a)), 2e1*eps)\n self.assertLessEqual(abs(fb-self.f(b)), 2e1*eps)\n\n # test the different permutations of self(xx, ..)\n def test_call(self):\n self.ff(self.xx)\n\n def test_call_bary(self):\n self.ff(self.xx, \"bary\")\n self.ff(self.xx, how=\"bary\")\n\n def test_call_clenshaw(self):\n self.ff(self.xx, \"clenshaw\")\n self.ff(self.xx, how=\"clenshaw\")\n\n def test_call_bary_vs_clenshaw(self):\n b = self.ff(self.xx, \"clenshaw\")\n c = self.ff(self.xx, \"bary\")\n self.assertLessEqual(infnorm(b-c), 2e2*eps)\n\n def test_call_raises(self):\n self.assertRaises(ValueError, self.ff, self.xx, \"notamethod\")\n self.assertRaises(ValueError, self.ff, self.xx, how=\"notamethod\")\n\n def test_vscale_empty(self):\n self.assertEquals(self.emptyfun.vscale, 0.)\n\n def test_copy(self):\n ff = self.ff\n gg = self.ff.copy()\n self.assertEquals(ff, ff)\n self.assertEquals(gg, gg)\n self.assertNotEquals(ff, gg)\n self.assertEquals(infnorm(ff.coeffs-gg.coeffs), 0)\n\n # check that the restricted fun matches self on the subinterval\n def test_restrict(self):\n i1 = Interval(-1,1)\n gg = self.ff.restrict(i1)\n yy = np.linspace(-1,1,1000)\n self.assertLessEqual(infnorm(self.ff(yy)-gg(yy)), 1e2*eps)\n\n # check that the restricted fun matches self on the subinterval\n def test_simplify(self):\n interval = Interval(-2,1)\n ff = Bndfun.initfun_fixedlen(self.f, interval, 1000)\n gg = ff.simplify()\n self.assertEqual(gg.size, standard_chop(ff.onefun.coeffs))\n self.assertEqual(infnorm(ff.coeffs[:gg.size]-gg.coeffs), 0)\n self.assertEqual(ff.interval, gg.interval)\n# --------------------------------------\n# vscale estimates\n# --------------------------------------\nvscales = [\n # (function, number of points, vscale)\n (lambda x: sin(4*pi*x), [-2, 2], 1),\n (lambda x: cos(x), [-10, 1], 1),\n (lambda x: cos(4*pi*x), [-100, 100], 1),\n (lambda x: exp(cos(4*pi*x)), [-1,1], exp(1)),\n (lambda x: cos(3244*x), [-2,0], 1),\n (lambda x: exp(x), [-1,2], exp(2)),\n (lambda x: 1e10*exp(x), [-1,1], 1e10*exp(1)),\n (lambda x: 0*x+1., [-1e5,1e4], 1),\n]\n\ndef definiteIntegralTester(fun, interval, vscale):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n def tester(self):\n absdiff = abs(ff.vscale-vscale)\n self.assertLessEqual(absdiff, .1*vscale)\n return tester\n\nfor k, args in enumerate(vscales):\n _testfun_ = definiteIntegralTester(*args)\n _testfun_.__name__ = \"test_vscale_{:02}\".format(k)\n setattr(ClassUsage, _testfun_.__name__, _testfun_)\n\n\nclass Plotting(unittest.TestCase):\n \"\"\"Unit-tests for Bndfun plotting methods\"\"\"\n\n def setUp(self):\n f = lambda x: sin(1*x) + 5e-1*cos(10*x) + 5e-3*sin(100*x)\n subinterval = Interval(-6, 10)\n self.f0 = Bndfun.initfun_fixedlen(f, subinterval, 1000)\n self.f1 = Bndfun.initfun_adaptive(f, subinterval)\n\n def test_plot(self):\n fig, ax = plt.subplots()\n self.f0.plot(ax=ax, color=\"g\", marker=\"o\", markersize=2, linestyle=\"\")\n\n def test_plotcoeffs(self):\n fig, ax = plt.subplots()\n self.f0.plotcoeffs(ax=ax)\n self.f1.plotcoeffs(ax=ax, color=\"r\")\n\n\n\nclass Calculus(unittest.TestCase):\n \"\"\"Unit-tests for Bndfun calculus operations\"\"\"\n\n def setUp(self):\n self.emptyfun = Bndfun(Chebtech2.initempty(), Interval())\n self.yy = np.linspace(-1,1,2000)\n# self.constfun = Bndfun(Chebtech2.initconst(1.), subinterval)\n\n # tests for the correct results in the empty cases\n def test_sum_empty(self):\n self.assertEqual(self.emptyfun.sum(), 0)\n\n def test_cumsum_empty(self):\n self.assertTrue(self.emptyfun.cumsum().isempty)\n\n def test_diff_empty(self):\n self.assertTrue(self.emptyfun.diff().isempty)\n\n# --------------------------------------\n# definite integrals\n# --------------------------------------\ndef_integrals = [\n # (function, interval, integral, tolerance)\n (lambda x: sin(x), [-2,2], .0, 2*eps),\n (lambda x: sin(4*pi*x), [-.1, .7], 0.088970317927147, 1e1*eps),\n (lambda x: cos(x), [-100,203], 0.426944059057085, 4e2*eps),\n (lambda x: cos(4*pi*x), [-1e-1,-1e-3], 0.074682699182803, 2*eps),\n (lambda x: exp(cos(4*pi*x)), [-3,1], 5.064263511008033, 4*eps),\n (lambda x: cos(3244*x), [0,0.4], -3.758628487169980e-05, 5e2*eps),\n (lambda x: exp(x), [-2,-1], exp(-1)-exp(-2), 2*eps),\n (lambda x: 1e10*exp(x), [-1,2], 1e10*(exp(2)-exp(-1)), 2e10*eps),\n (lambda x: 0*x+1., [-100,300], 400, eps),\n]\n\ndef definiteIntegralTester(fun, interval, integral, tol):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n def tester(self):\n absdiff = abs(ff.sum()-integral)\n self.assertLessEqual(absdiff, tol)\n return tester\n\nfor k, (fun, n, integral, tol) in enumerate(def_integrals):\n _testfun_ = definiteIntegralTester(fun, n, integral, tol)\n _testfun_.__name__ = \"test_sum_{:02}\".format(k)\n setattr(Calculus, _testfun_.__name__, _testfun_)\n\n# --------------------------------------\n# indefinite integrals\n# --------------------------------------\nindef_integrals = [\n # (function, indefinite integral, interval, tolerance)\n (lambda x: 0*x+1., lambda x: x, [-2,3], eps),\n (lambda x: x, lambda x: 1/2*x**2, [-5,0], 4*eps),\n (lambda x: x**2, lambda x: 1/3*x**3, [1,10], 2e2*eps),\n (lambda x: x**3, lambda x: 1/4*x**4, [-1e-2,4e-1], 2*eps),\n (lambda x: x**4, lambda x: 1/5*x**5, [-3,-2], 3e2*eps),\n (lambda x: x**5, lambda x: 1/6*x**6, [-1e-10,1], 4*eps),\n (lambda x: sin(x), lambda x: -cos(x), [-10,22], 3e1*eps),\n (lambda x: cos(3*x), lambda x: 1./3*sin(3*x), [-3,4], 2*eps),\n (lambda x: exp(x), lambda x: exp(x), [-60,1], 1e1*eps),\n (lambda x: 1e10*exp(x), lambda x: 1e10*exp(x), [-1,1], 1e10*(3*eps)),\n]\n\ndef indefiniteIntegralTester(fun, ifn, interval, tol):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n gg = Bndfun.initfun_fixedlen(ifn, subinterval, ff.size+1)\n coeffs = gg.coeffs\n coeffs[0] = coeffs[0] - ifn(np.array([interval[0]]))\n def tester(self):\n absdiff = infnorm(ff.cumsum().coeffs - coeffs)\n self.assertLessEqual(absdiff, tol)\n return tester\n\nfor k, (fun, dfn, n, tol) in enumerate(indef_integrals):\n _testfun_ = indefiniteIntegralTester(fun, dfn, n, tol)\n _testfun_.__name__ = \"test_cumsum_{:02}\".format(k)\n setattr(Calculus, _testfun_.__name__, _testfun_)\n\n# --------------------------------------\n# derivatives\n# --------------------------------------\nderivatives = [\n# (function, derivative, number of points, tolerance)\n (lambda x: 0*x+1., lambda x: 0*x+0, [-2,3], eps),\n (lambda x: x, lambda x: 0*x+1, [-5,0], 2e1*eps),\n (lambda x: x**2, lambda x: 2*x, [1,10], 2e2*eps),\n (lambda x: x**3, lambda x: 3*x**2, [-1e-2,4e-1], 3*eps),\n (lambda x: x**4, lambda x: 4*x**3, [-3,-2], 1e3*eps),\n (lambda x: x**5, lambda x: 5*x**4, [-1e-10,1], 4e1*eps),\n (lambda x: sin(x), lambda x: cos(x), [-10,22], 5e2*eps),\n (lambda x: cos(3*x), lambda x: -3*sin(3*x), [-3,4], 5e2*eps),\n (lambda x: exp(x), lambda x: exp(x), [-60,1], 2e2*eps),\n (lambda x: 1e10*exp(x), lambda x: 1e10*exp(x), [-1,1], 1e10*2e2*eps),\n]\n\ndef derivativeTester(fun, ifn, interval, tol):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n gg = Bndfun.initfun_fixedlen(ifn, subinterval, max(ff.size-1,1))\n def tester(self):\n absdiff = infnorm(ff.diff().coeffs - gg.coeffs)\n self.assertLessEqual(absdiff, tol)\n return tester\n\nfor k, (fun, der, n, tol) in enumerate(derivatives):\n _testfun_ = derivativeTester(fun, der, n, tol)\n _testfun_.__name__ = \"test_diff_{:02}\".format(k)\n setattr(Calculus, _testfun_.__name__, _testfun_)\n\n\nclass Construction(unittest.TestCase):\n \"\"\"Unit-tests for construction of Bndfun objects\"\"\"\n\n def test_onefun_construction(self):\n coeffs = np.random.rand(10)\n subinterval = Interval()\n onefun = Chebtech2(coeffs)\n f = Bndfun(onefun, subinterval)\n self.assertIsInstance(f, Bndfun)\n self.assertLess(infnorm(f.coeffs-coeffs), eps)\n\n def test_const_construction(self):\n subinterval = Interval()\n ff = Bndfun.initconst(1., subinterval)\n self.assertEquals(ff.size, 1)\n self.assertTrue(ff.isconst)\n self.assertFalse(ff.isempty)\n self.assertRaises(ValueError, Bndfun.initconst, [1.], subinterval)\n\n def test_empty_construction(self):\n ff = Bndfun.initempty()\n self.assertEquals(ff.size, 0)\n self.assertFalse(ff.isconst)\n self.assertTrue(ff.isempty)\n self.assertRaises(TypeError, Bndfun.initempty, [1.])\n\n def test_identity_construction(self):\n for (a,b) in [(-1,1), (-10,-2), (-2.3, 1.24), (20,2000)]:\n itvl = Interval(a,b)\n ff = Bndfun.initidentity(itvl)\n self.assertEquals(ff.size, 2)\n xx = np.linspace(a,b,1001)\n tol = eps * abs(itvl).max()\n self.assertLessEqual(infnorm(ff(xx)-xx), tol)\n\ndef adaptiveTester(fun, subinterval, funlen):\n ff = Bndfun.initfun_adaptive(fun, subinterval)\n def tester(self):\n self.assertEquals(ff.size, funlen)\n return tester\n\ndef fixedlenTester(fun, subinterval, n):\n ff = Bndfun.initfun_fixedlen(fun, subinterval, n)\n def tester(self):\n self.assertEquals(ff.size, n)\n return tester\n\nfuns = []\nfun_details = [\n # (function, name for the test printouts,\n # Matlab chebfun adaptive degree on [-2,3])\n (lambda x: x**3 + x**2 + x + 1, \"poly3(x)\", [-2,3], 4),\n (lambda x: exp(x), \"exp(x)\", [-2,3], 20),\n (lambda x: sin(x), \"sin(x)\", [-2,3], 20),\n (lambda x: cos(20*x), \"cos(20x)\", [-2,3], 90),\n (lambda x: 0.*x+1., \"constfun\", [-2,3], 1),\n (lambda x: 0.*x, \"zerofun\", [-2,3], 1),\n]\n\nfor k, (fun, name, interval, funlen) in enumerate(fun_details):\n\n fun.__name__ = name\n subinterval = Interval(*interval)\n\n # add the adaptive tests\n _testfun_ = adaptiveTester(fun, subinterval, funlen)\n _testfun_.__name__ = \"test_adaptive_{}\".format(fun.__name__)\n setattr(Construction, _testfun_.__name__, _testfun_)\n\n # add the fixedlen tests\n for n in np.array([100]):\n _testfun_ = fixedlenTester(fun, subinterval, n)\n _testfun_.__name__ = \\\n \"test_fixedlen_{}_{:003}pts\".format(fun.__name__, n)\n setattr(Construction, _testfun_.__name__, _testfun_)\n\n\nclass Algebra(unittest.TestCase):\n \"\"\"Unit-tests for Bndfun algebraic operations\"\"\"\n def setUp(self):\n self.yy = np.linspace(-1,1,1000)\n self.emptyfun = Bndfun.initempty()\n\n # check (empty Bndfun) + (Bndfun) = (empty Bndfun)\n # and (Bndfun) + (empty Bndfun) = (empty Bndfun)\n def test__add__radd__empty(self):\n subinterval = Interval(-2,3)\n for (fun, _, _) in testfunctions:\n chebtech = Bndfun.initfun_adaptive(fun, subinterval)\n self.assertTrue((self.emptyfun+chebtech).isempty)\n self.assertTrue((chebtech+self.emptyfun).isempty)\n\n # check the output of (constant + Bndfun)\n # and (Bndfun + constant)\n def test__add__radd__constant(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for (fun, _, _) in testfunctions:\n for const in (-1, 1, 10, -1e5):\n f = lambda x: const + fun(x)\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n f1 = const + bndfun\n f2 = bndfun + const\n tol = 4e1 * eps * abs(const)\n self.assertLessEqual(infnorm(f(xx)-f1(xx)), tol)\n self.assertLessEqual(infnorm(f(xx)-f2(xx)), tol)\n\n # check (empty Bndfun) - (Bndfun) = (empty Bndfun)\n # and (Bndfun) - (empty Bndfun) = (empty Bndfun)\n def test__sub__rsub__empty(self):\n subinterval = Interval(-2,3)\n for (fun, _, _) in testfunctions:\n chebtech = Bndfun.initfun_adaptive(fun, subinterval)\n self.assertTrue((self.emptyfun-chebtech).isempty)\n self.assertTrue((chebtech-self.emptyfun).isempty)\n\n # check the output of constant - Bndfun\n # and Bndfun - constant\n def test__sub__rsub__constant(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for (fun, _, _) in testfunctions:\n for const in (-1, 1, 10, -1e5):\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n f = lambda x: const - fun(x)\n g = lambda x: fun(x) - const\n ff = const - bndfun\n gg = bndfun - const\n tol = 5e1 * eps * abs(const)\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), tol)\n self.assertLessEqual(infnorm(g(xx)-gg(xx)), tol)\n\n # check (empty Bndfun) * (Bndfun) = (empty Bndfun)\n # and (Bndfun) * (empty Bndfun) = (empty Bndfun)\n def test__mul__rmul__empty(self):\n subinterval = Interval(-2,3)\n for (fun, _, _) in testfunctions:\n chebtech = Bndfun.initfun_adaptive(fun, subinterval)\n self.assertTrue((self.emptyfun*chebtech).isempty)\n self.assertTrue((chebtech*self.emptyfun).isempty)\n\n # check the output of constant * Bndfun\n # and Bndfun * constant\n def test__mul__rmul__constant(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for (fun, _, _) in testfunctions:\n for const in (-1, 1, 10, -1e5):\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n f = lambda x: const * fun(x)\n g = lambda x: fun(x) * const\n ff = const * bndfun\n gg = bndfun * const\n tol = 4e1 * eps * abs(const)\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), tol)\n self.assertLessEqual(infnorm(g(xx)-gg(xx)), tol)\n\n # check (empty Bndfun) / (Bndfun) = (empty Bndfun)\n # and (Bndfun) / (empty Bndfun) = (empty Bndfun)\n def test_truediv_empty(self):\n subinterval = Interval(-2,3)\n for (fun, _, _) in testfunctions:\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n self.assertTrue(operator.truediv(self.emptyfun, bndfun).isempty)\n self.assertTrue(operator.truediv(self.emptyfun, bndfun).isempty)\n # __truediv__\n self.assertTrue((self.emptyfun/bndfun).isempty)\n self.assertTrue((bndfun/self.emptyfun).isempty)\n\n # check the output of constant / Bndfun\n # and Bndfun / constant\n def test_truediv_constant(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for (fun, _, hasRoots) in testfunctions:\n for const in (-1, 1, 10, -1e5):\n hscl = abs(subinterval).max()\n tol = hscl * eps * abs(const)\n bndfun = Bndfun.initfun_adaptive(fun, subinterval)\n g = lambda x: fun(x) / const\n gg = bndfun / const\n self.assertLessEqual(infnorm(g(xx)-gg(xx)), 3*gg.size*tol)\n # don't do the following test for functions with roots\n if not hasRoots:\n f = lambda x: const / fun(x)\n ff = const / bndfun\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), 2*ff.size*tol)\n\n # check +(empty Bndfun) = (empty Bndfun)\n def test__pos__empty(self):\n self.assertTrue((+self.emptyfun).isempty)\n\n # check -(empty Bndfun) = (empty Bndfun)\n def test__neg__empty(self):\n self.assertTrue((-self.emptyfun).isempty)\n\n # check (empty Bndfun) ** c = (empty Bndfun)\n def test_pow_empty(self):\n for c in range(10):\n self.assertTrue((self.emptyfun**c).isempty)\n\n # check c ** (empty Bndfun) = (empty Bndfun)\n def test_rpow_empty(self):\n for c in range(10):\n self.assertTrue((c**self.emptyfun).isempty)\n\n # check the output of Bndfun ** constant\n def test_pow_const(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for func in (np.sin, np.exp, np.cos):\n for c in (1, 2):\n f = lambda x: func(x) ** c\n ff = Bndfun.initfun_adaptive(func, subinterval) ** c\n tol = 2e1 * eps * abs(c)\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), tol)\n\n # check the output of constant ** Bndfun\n def test_rpow_const(self):\n subinterval = Interval(-.5,.9)\n xx = subinterval(self.yy)\n for func in (np.sin, np.exp, np.cos):\n for c in (1, 2):\n f = lambda x: c ** func(x)\n ff = c ** Bndfun.initfun_adaptive(func, subinterval)\n tol = 1e1 * eps * abs(c)\n self.assertLessEqual(infnorm(f(xx)-ff(xx)), tol)\n\nbinops = (operator.add, operator.mul, operator.sub, operator.truediv)\n\n# add tests for the binary operators\ndef binaryOpTester(f, g, subinterval, binop):\n ff = Bndfun.initfun_adaptive(f, subinterval)\n gg = Bndfun.initfun_adaptive(g, subinterval)\n FG = lambda x: binop(f(x),g(x))\n fg = binop(ff, gg)\n def tester(self):\n vscl = max([ff.vscale, gg.vscale])\n lscl = max([ff.size, gg.size])\n xx = subinterval(self.yy)\n self.assertLessEqual(infnorm(fg(xx)-FG(xx)), 6*vscl*lscl*eps)\n return tester\n\n# Note: defining __radd__(a,b) = __add__(b,a) and feeding this into the\n# test will not in fact test the __radd__ functionality of the class.\n# These tests will need to be added manually.\n\nsubintervals = (\n Interval(-.5,.9),\n Interval(-1.2, 1.3),\n Interval(-2.2, -1.9),\n Interval(0.4, 1.3),\n)\n\nfor binop in binops:\n # add the generic binary operator tests\n for (f, _, _), (g, _, denomRoots) in \\\n itertools.combinations(testfunctions, 2):\n for subinterval in subintervals:\n if binop is operator.truediv and denomRoots:\n # skip truediv test if denominator has roots on the real line\n pass\n else:\n _testfun_ = binaryOpTester(f, g, subinterval, binop)\n a, b = subinterval\n _testfun_.__name__ = \\\n \"test_{}_{}_{}_[{:.1f},{:.1f}]\".format(\n binop.__name__, f.__name__, g.__name__, a, b)\n setattr(Algebra, _testfun_.__name__, _testfun_)\n\npowtestfuns = (\n [(np.exp, 'exp'), (np.sin, 'sin')],\n [(np.exp, 'exp'), (lambda x: 2-x, 'linear')],\n [(lambda x: 2-x, 'linear'), (np.exp, 'exp')],\n)\n# add operator.power tests\nfor (f, namef), (g, nameg) in powtestfuns:\n for subinterval in subintervals:\n _testfun_ = binaryOpTester(f, g, subinterval, operator.pow)\n a, b = subinterval\n _testfun_.__name__ = \\\n \"test_{}_{}_{}_[{:.1f},{:.1f}]\".format(\n 'pow', namef, nameg, a, b)\n setattr(Algebra, _testfun_.__name__, _testfun_)\n\nunaryops = (operator.pos, operator.neg)\n\n# add tests for the unary operators\ndef unaryOpTester(unaryop, f, subinterval):\n ff = Bndfun.initfun_adaptive(f, subinterval)\n gg = lambda x: unaryop(f(x))\n GG = unaryop(ff)\n def tester(self):\n xx = subinterval(self.yy)\n self.assertLessEqual(infnorm(gg(xx)-GG(xx)), 4e1*eps)\n return tester\n\nfor unaryop in unaryops:\n for (f, _, _) in testfunctions:\n subinterval = Interval(-.5,.9)\n _testfun_ = unaryOpTester(unaryop, f, subinterval)\n _testfun_.__name__ = \\\n \"test_{}_{}\".format(unaryop.__name__, f.__name__)\n setattr(Algebra, _testfun_.__name__, _testfun_)\n\n\nclass Ufuncs(unittest.TestCase):\n \"\"\"Unit-tests for Bndfun numpy ufunc overloads\"\"\"\n def setUp(self):\n self.yy = np.linspace(-1,1,1000)\n self.emptyfun = Bndfun.initempty()\n\nufuncs = (np.absolute, np.arccos, np.arccosh, np.arcsin, np.arcsinh, np.arctan,\n np.arctanh, np.cos, np.cosh, np.exp, np.exp2, np.expm1, np.log,\n np.log2, np.log10, np.log1p, np.sinh, np.sin, np.tan, np.tanh,\n np.sqrt)\n\n# empty-case tests\ndef ufuncEmptyCaseTester(ufunc):\n def tester(self):\n self.assertTrue(getattr(self.emptyfun, ufunc.__name__)().isempty)\n return tester\n\nfor ufunc in ufuncs:\n _testfun_ = ufuncEmptyCaseTester(ufunc)\n _testfun_.__name__ = \"test_emptycase_{}\".format(ufunc.__name__)\n setattr(Ufuncs, _testfun_.__name__, _testfun_)\n\n# TODO: Add more test cases\n# add ufunc tests:\n# (ufunc, [([fun1, interval1], tol1), ([fun2, interval2], tol2), ... ])\n\nuf1 = lambda x: x\nuf1.__name__ = \"x\"\nuf2 = lambda x: sin(x-.5)\nuf2.__name__ = \"sin(x-.5)\"\nuf3 = lambda x: sin(25*x-1)\nuf3.__name__ = \"sin(25*x-1)\"\n\nufunc_test_params = [\n (np.absolute, [([uf1, (-3,-.5)], eps), ]),\n (np.arccos, [([uf1, (-.8,.8)], eps), ]),\n (np.arccosh, [([uf1, (2,3) ], eps), ]),\n (np.arcsin, [([uf1, (-.8,.8)], eps), ]),\n (np.arcsinh, [([uf1, (2,3) ], eps), ]),\n (np.arctan, [([uf1, (-.8,.8)], eps), ]),\n (np.arctanh, [([uf1, (-.8,.8)], eps), ]),\n (np.cos, [([uf1, (-3,3) ], eps), ]),\n (np.cosh, [([uf1, (-3,3) ], eps), ]),\n (np.exp, [([uf1, (-3,3) ], eps), ]),\n (np.exp2, [([uf1, (-3,3) ], eps), ]),\n (np.expm1, [([uf1, (-3,3) ], eps), ]),\n (np.log, [([uf1, (2,3) ], eps), ]),\n (np.log2, [([uf1, (2,3) ], eps), ]),\n (np.log10, [([uf1, (2,3) ], eps), ]),\n (np.log1p, [([uf1, (-.8,.8)], eps), ]),\n (np.sinh, [([uf1, (-3,3) ], eps), ]),\n (np.sin, [([uf1, (-3,3) ], eps), ]),\n (np.tan, [([uf1, (-.8,.8)], eps), ]),\n (np.tanh, [([uf1, (-3,3) ], eps), ]),\n (np.sqrt, [([uf1, (2,3) ], eps), ]),\n\n (np.cos, [([uf2, (-3,3) ], eps), ]),\n (np.cosh, [([uf2, (-3,3) ], eps), ]),\n (np.exp, [([uf2, (-3,3) ], eps), ]),\n (np.expm1, [([uf2, (-3,3) ], eps), ]),\n (np.sinh, [([uf2, (-3,3) ], eps), ]),\n (np.sin, [([uf2, (-3,3) ], eps), ]),\n (np.tan, [([uf2, (-.8,.8)], eps), ]),\n (np.tanh, [([uf2, (-3,3) ], eps), ]),\n\n (np.cos, [([uf3, (-3,3) ], eps), ]),\n (np.cosh, [([uf3, (-3,3) ], eps), ]),\n (np.exp, [([uf3, (-3,3) ], eps), ]),\n (np.expm1, [([uf3, (-3,3) ], eps), ]),\n (np.sinh, [([uf3, (-3,3) ], eps), ]),\n (np.sin, [([uf3, (-3,3) ], eps), ]),\n (np.tan, [([uf3, (-.8,.8)], eps), ]),\n (np.tanh, [([uf3, (-3,3) ], eps), ]),\n]\n\ndef ufuncTester(ufunc, f, interval, tol):\n ff = Bndfun.initfun_adaptive(f, interval)\n gg = lambda x: ufunc(f(x))\n GG = getattr(ff, ufunc.__name__)()\n def tester(self):\n xx = interval(self.yy)\n vscl = GG.vscale\n lscl = GG.size\n self.assertLessEqual(infnorm(gg(xx)-GG(xx)), vscl*lscl*tol)\n return tester\n\nfor (ufunc, [([f, intvl], tol), ]) in ufunc_test_params:\n interval = Interval(*intvl)\n _testfun_ = ufuncTester(ufunc, f, interval, tol)\n _testfun_.__name__ = \\\n \"test_{}_{}_[{:.1f},{:.1f}]\".format(\n ufunc.__name__, f.__name__, *intvl)\n setattr(Ufuncs, _testfun_.__name__, _testfun_)\n\n\nclass Roots(unittest.TestCase):\n\n def test_empty(self):\n ff = Bndfun.initempty()\n self.assertEquals(ff.roots().size, 0)\n\n def test_const(self):\n ff = Bndfun.initconst(0., Interval(-2,3))\n gg = Bndfun.initconst(2., Interval(-2,3))\n self.assertEquals(ff.roots().size, 0)\n self.assertEquals(gg.roots().size, 0)\n\n# add tests for roots\ndef rootsTester(f, interval, roots, tol):\n subinterval = Interval(*interval)\n ff = Bndfun.initfun_adaptive(f, subinterval)\n rts = ff.roots()\n def tester(self):\n self.assertLessEqual(infnorm(rts-roots), tol)\n return tester\n\nrootstestfuns = (\n (lambda x: 3*x+2., [-2,3], np.array([-2/3]), eps),\n (lambda x: x**2+.2*x-.08, [-2,5], np.array([-.4, .2]), 3e1*eps),\n (lambda x: sin(x), [-7,7], pi*np.linspace(-2,2,5), 1e1*eps),\n (lambda x: cos(2*pi*x), [-20,10], np.linspace(-19.75, 9.75, 60), 3e1*eps),\n (lambda x: sin(100*pi*x), [-0.5,0.5], np.linspace(-.5,.5,101), eps),\n (lambda x: sin(5*pi/2*x), [-1,1], np.array([-.8, -.4, 0, .4, .8]), eps)\n )\nfor k, args in enumerate(rootstestfuns):\n _testfun_ = rootsTester(*args)\n _testfun_.__name__ = \"test_roots_{}\".format(k)\n setattr(Roots, _testfun_.__name__, _testfun_)\n\n# reset the testsfun variable so it doesn't get picked up by nose\n_testfun_ = None\n",
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nclass DefaultPrefs():\n eps = np.finfo(float).eps\n tech = \"Chebtech2\"\n domain = np.array([-1., 1.])"
] | [
[
"matplotlib.pyplot.subplots",
"numpy.array",
"numpy.random.rand",
"numpy.linspace"
],
[
"numpy.array",
"numpy.finfo"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
solderneer/opencv-adventures | [
"20abea930f44296367217145fab73866ea654084"
] | [
"blur.py"
] | [
"#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\nimage = cv2.imread('../images/input.jpg')\n\nblur = cv2.blur(image, (3,3))\ngaussian_blur = cv2.GaussianBlur(image, (3,3), 0)\nmedian = cv2.medianBlur(image, 5)\n\ncv2.imshow(\"boxblux\", blur)\ncv2.waitKey()\ncv2.imshow(\"gaussian\", gaussian_blur)\ncv2.waitKey()\ncv2.imshow(\"median\", median)\ncv2.waitKey()\n\n# should go look into image de-noising\n# brighten needs an array [-1,-1,-1],[-1,9,-1],[-1,-1,-1]\n# kernel/convolution matrix can even be used for edge detection\nmatrix = np.array([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]])\nsharp = cv2.filter2D(image, -1, matrix)\n\ncv2.imshow(\"sharp\", sharp)\ncv2.waitKey()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liuzuxin/RL-Safety-Algorithms | [
"2575225b1ea8ce12e1e13f7a81f8dda7b4189708"
] | [
"tests/test_mean_std.py"
] | [
"import unittest\nimport numpy as np\nimport torch\nfrom rl_safety_algorithms.common.online_mean_std import OnlineMeanStd\nimport rl_safety_algorithms.common.mpi_tools as mpi_tools\n\n\nclass TestOnlineMeanStd(unittest.TestCase):\n \"\"\" Testing the non-MPI version.\n \"\"\"\n\n @staticmethod\n def perform_single_pass(rms, input_shape) -> bool:\n x = torch.from_numpy(np.random.normal(size=input_shape))\n rms(x) # perform one call\n return True\n\n @staticmethod\n def get_data(M, N, epoch):\n \"\"\"Returns data matrix of shape MxN.\"\"\"\n np.random.seed(epoch)\n # start = 10000 + 4 * epoch\n # stop = pid*10000 + M * N + 4 * epoch\n data = np.random.normal(size=(M, N))\n return data\n \n def test_vector_updates(self):\n \"\"\" OnlineMeanStd module is updated with a batch of vector inputs,\n i.e. inputs of shape M x N.\n Note that std dev might differ more than 1e-5 when epochs > 10.\n \"\"\"\n epochs = 20\n T = 500\n obs_shape = (1, )\n\n # === calculation through online updates\n rms = OnlineMeanStd(shape=obs_shape)\n for ep in range(epochs):\n # shape of batch: T x obs_shape\n vector_input = self.get_data(T, obs_shape[0], ep).flatten()\n rms.update(vector_input)\n rms_mean = rms.mean.numpy()\n rms_std = rms.std.numpy()\n\n # ===== calculate ground truths\n obs_list = [self.get_data(T, obs_shape[0], ep) for ep in range(epochs)]\n obs = np.vstack(obs_list)\n gt_mean = np.mean(obs, axis=0)\n gt_std = np.std(obs, axis=0)\n\n self.assertTrue(np.allclose(rms_mean, gt_mean))\n self.assertTrue(np.allclose(rms_std, gt_std, rtol=1e-2))\n self.assertTrue(self.perform_single_pass(rms, obs_shape))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.allclose",
"numpy.random.seed",
"numpy.std",
"numpy.random.normal",
"numpy.mean",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abdelsamea/DeTraC | [
"ab03719b49a1a048f74f08600a6670f6757bbe60"
] | [
"src/frameworks/detrac_torch/feature_composer.py"
] | [
"import tensorflow as tf\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n\nfrom tools.preprocessing import preprocess_images, preprocess_single_image\nfrom tools.kfold import KFold_cross_validation_split\nfrom tools.extraction_and_metrics import extract_features, compute_confusion_matrix\n\nfrom .network import Net\n\nimport torchvision.models as models\nimport torch\n\nimport os\nimport cv2\n\n# Feature composer training\ndef train_feature_composer(\n composed_dataset_path: str,\n epochs: int,\n batch_size: int,\n num_classes: int,\n folds: int,\n lr:float,\n cuda: bool,\n ckpt_dir: str\n):\n \"\"\"\n Feature extractor training.\n\n params:\n <string> composed_dataset_path\n <int> epochs\n <int> batch_size\n <int> num_classes\n <int> folds: Number of folds for KFold cross validation \n <float> lr: Learning rate\n <bool> cuda: Whether to use GPU or not\n <string> ckpt_dir: Model's location\n \"\"\"\n\n # Preprocess images, returning the classes, features and labels\n class_names, x, y = preprocess_images(\n dataset_path=composed_dataset_path, \n width=224, \n height=224, \n num_classes=num_classes, \n framework=\"torch\", \n imagenet=True\n )\n\n # Split data\n X_train, X_test, Y_train, Y_test = KFold_cross_validation_split(\n features=x, \n labels=y, \n n_splits=folds\n )\n\n # Normalize\n X_train /= 255\n X_test /= 255\n\n # Instantiate model\n net = Net(\n models.vgg16(pretrained=True),\n num_classes=num_classes,\n lr=lr,\n cuda=cuda,\n mode=\"feature_composer\",\n ckpt_dir=ckpt_dir,\n labels=class_names\n )\n\n # Train model\n net.fit(\n X_train,\n Y_train,\n X_test,\n Y_test,\n epochs,\n batch_size,\n resume=False\n )\n\n # Confusion matrix\n compute_confusion_matrix(\n y_true=Y_test, \n y_pred=net.infer(X_test), \n framework=\"torch\", \n mode=\"feature_composer\", \n num_classes = num_classes // 2\n )\n\n# Inference\ndef infer(\n ckpt_dir: str, \n ckpt_name: str, \n input_image: str\n) -> dict:\n \"\"\"\n Main inference method.\n\n params:\n <string> ckpt_dir: Saved model's directory\n <string> ckpt_name: Saved model's name\n <string> input_image: Image path\n\n returns:\n <dict> Dictionary containing the predictions with their levels of confidence.\n E.g.: {\n COVID19_1:0.10\n COVID19_2:0.15\n ...\n }\n \"\"\"\n ckpt_path = os.path.join(ckpt_dir, ckpt_name)\n num_classes = torch.load(ckpt_path, map_location=lambda storage, loc: storage)[\"num_classes\"]\n \n # Instantiate model\n net = Net(\n models.vgg16(pretrained=True),\n num_classes=num_classes,\n mode=\"feature_composer\",\n ckpt_dir=ckpt_dir\n )\n \n # Load model\n net.load_model_for_inference(os.path.join(ckpt_dir, ckpt_name))\n \n # Check if inputed file is an image.\n assert input_image.lower().endswith(\"png\") or input_image.lower().endswith(\"jpg\") or input_image.lower().endswith(\"jpeg\")\n\n # Preprocess\n img = preprocess_single_image(\n img=input_image, \n width=224, \n height=224, \n imagenet=True, \n framework=\"torch\"\n )\n\n # Return prediction\n return net.infer(img, ckpt_path = os.path.join(ckpt_dir, ckpt_name), use_labels=True)\n"
] | [
[
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rangsimanketkaew/learning-to-smell | [
"17021a82f7fcdda00536a906dd8dc64cb5663261"
] | [
"metric.py"
] | [
"import tensorflow as tf\n# from tensorflow.python.framework.ops import disable_eager_execution\n# disable_eager_execution()\nfrom tensorflow.keras import backend as K\n\n\ndef jaccard_tensorflow(y_true, y_pred):\n \"\"\"Jaccard score of Tensor in tensorflow for graph mode.\n \"\"\"\n intersection = tf.sets.intersection(y_true[None:], y_pred[None:])\n intersection = tf.sparse.to_dense(intersection)[0]\n union = tf.sets.union(y_true[None:], y_pred[None:])\n union = tf.sparse.to_dense(union)[0]\n return float(len(intersection) / len(union))\n\n\ndef jaccard_tensorflow_eager(y_true, y_pred):\n \"\"\"Jaccard score with built-in function in tensorflow in eager mode.\n \"\"\"\n set1 = set(y_true.numpy())\n set2 = set(y_pred.numpy())\n return float((len(set1.intersection(set2))) / (len(set1.union(set2))))\n\n\ndef jaccard_from_keras_cont(y_true, y_pred):\n \"\"\"Jaccard score for keras.\n Taken directly from https://github.com/keras-team/keras-contrib/blob/master/keras_contrib/losses/jaccard.py\n \"\"\"\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)\n jac = (intersection) / (sum_ - intersection)\n return (1 - jac)\n"
] | [
[
"tensorflow.sets.union",
"tensorflow.sets.intersection",
"tensorflow.keras.backend.abs",
"tensorflow.sparse.to_dense"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
faver2014/InertialNav_Learn | [
"58a0b6db95918e037ed6d08e5d2c8ba2ce388554"
] | [
"code/plot_wind.py"
] | [
"#!/bin/python\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\nimport numpy as np\nimport math\n\n\t# State vector:\n\t# 0-3: quaternions (q0, q1, q2, q3)\n\t# 4-6: Velocity - m/sec (North, East, Down)\n\t# 7-9: Position - m (North, East, Down)\n\t# 10-12: Delta Angle bias - rad (X,Y,Z)\n\t# 13: Accel offset\n\t# 14-15: Wind Vector - m/sec (North,East)\n\t# 16-18: Earth Magnetic Field Vector - milligauss (North, East, Down)\n\t# 19-21: Body Magnetic Field Vector - milligauss (X,Y,Z)\n\t# 22: Terrain\ntry:\n\tdata = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,\n\t\tskip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',\n\t\t'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd', 'dist'])\nexcept ValueError:\n\ttry:\n\t\tdata = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,\n\t\tskip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',\n\t\t'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])\n\texcept ValueError:\n\t\tdata = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,\n\t\t\tskip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',\n\t\t\t'Bx', 'By', 'Bz', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])\n\nfig = plt.figure()\n\nax1 = fig.add_subplot(211)\n\nax1.set_title(\"Wind Velocity\") \nax1.set_xlabel('time (s)')\nax1.set_ylabel('Wind North')\nax1.plot(data['time'], data['Wn'], color='r', label='Wind N')\n\nax2 = fig.add_subplot(212)\n \nax2.set_xlabel('time (s)')\nax2.set_ylabel('Wind East')\nax2.plot(data['time'], data['We'], color='g', label='Wind E')\n\nplt.show()"
] | [
[
"matplotlib.pyplot.show",
"numpy.genfromtxt",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zimo-geek/mindspore | [
"665ec683d4af85c71b2a1f0d6829356f2bc0e1ff"
] | [
"mindspore/python/mindspore/train/callback/_loss_monitor.py"
] | [
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"LossMonitor Callback class.\"\"\"\n\nimport numpy as np\nfrom mindspore.common.tensor import Tensor\n\nfrom ._callback import Callback\n\n\nclass LossMonitor(Callback):\n \"\"\"\n Monitor the loss in training.\n\n If the loss is NAN or INF, it will terminate training.\n\n Note:\n If per_print_times is 0, do not print loss.\n\n Args:\n per_print_times (int): How many steps to print once loss. During sink mode, it will print loss in the\n nearest step. Default: 1.\n\n Raises:\n ValueError: If per_print_times is not an integer or less than zero.\n \"\"\"\n\n def __init__(self, per_print_times=1):\n super(LossMonitor, self).__init__()\n if not isinstance(per_print_times, int) or per_print_times < 0:\n raise ValueError(\"The argument 'per_print_times' must be int and >= 0, \"\n \"but got {}\".format(per_print_times))\n self._per_print_times = per_print_times\n self._last_print_time = 0\n\n def step_end(self, run_context):\n \"\"\"\n Print training loss at the end of step.\n\n Args:\n run_context (RunContext): Context of the train running.\n \"\"\"\n cb_params = run_context.original_args()\n loss = cb_params.net_outputs\n\n if isinstance(loss, (tuple, list)):\n if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):\n loss = loss[0]\n\n if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):\n loss = float(np.mean(loss.asnumpy()))\n\n cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1\n\n if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):\n raise ValueError(\"epoch: {} step: {}. Invalid loss, terminating training.\".format(\n cb_params.cur_epoch_num, cur_step_in_epoch))\n if self._per_print_times != 0 and (cb_params.cur_step_num - self._last_print_time) >= self._per_print_times:\n self._last_print_time = cb_params.cur_step_num\n print(\"epoch: %s step: %s, loss is %s\" % (cb_params.cur_epoch_num, cur_step_in_epoch, loss), flush=True)\n"
] | [
[
"numpy.isnan",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
How-Wang/onnx | [
"c940fa3fea84948e46603cab2f86467291443beb",
"c940fa3fea84948e46603cab2f86467291443beb",
"c940fa3fea84948e46603cab2f86467291443beb",
"c940fa3fea84948e46603cab2f86467291443beb"
] | [
"onnx/backend/test/case/node/reducemin.py",
"onnx/backend/test/case/node/cumsum.py",
"onnx/backend/test/case/node/acos.py",
"onnx/backend/test/case/node/squeeze.py"
] | [
"# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass ReduceMin(Base):\n\n @staticmethod\n def export_do_not_keepdims() -> None:\n shape = [3, 2, 2]\n axes = [1]\n keepdims = 0\n\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\n data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n #print(reduced)\n #[[5., 1.]\n # [30., 1.]\n # [55., 1.]]\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_example')\n\n np.random.seed(0)\n data = np.random.uniform(-10, 10, shape).astype(np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_random')\n\n @staticmethod\n def export_keepdims() -> None:\n shape = [3, 2, 2]\n axes = [1]\n keepdims = 1\n\n node = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\n data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n #print(reduced)\n #[[[5., 1.]]\n # [[30., 1.]]\n # [[55., 1.]]]\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_example')\n\n np.random.seed(0)\n data = np.random.uniform(-10, 10, shape).astype(np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_random')\n\n @staticmethod\n def export_default_axes_keepdims() -> None:\n shape = [3, 2, 2]\n axes = None\n keepdims = 1\n\n node = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\n data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\n reduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n #print(reduced)\n #[[[1.]]]\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_example')\n\n np.random.seed(0)\n data = np.random.uniform(-10, 10, shape).astype(np.float32)\n reduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_random')\n\n @staticmethod\n def export_negative_axes_keepdims() -> None:\n shape = [3, 2, 2]\n axes = [-2]\n keepdims = 1\n\n node = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\n data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n # print(reduced)\n #[[[5., 1.]]\n # [[30., 1.]]\n # [[55., 1.]]]\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_example')\n\n np.random.seed(0)\n data = np.random.uniform(-10, 10, shape).astype(np.float32)\n reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\n expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_random')\n",
"# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass CumSum(Base):\n\n @staticmethod\n def export_cumsum_1d() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y']\n )\n x = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\n axis = np.int32(0)\n y = np.array([1., 3., 6., 10., 15.]).astype(np.float64)\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d')\n\n @staticmethod\n def export_cumsum_1d_exclusive() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n exclusive=1\n )\n x = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\n axis = np.int32(0)\n y = np.array([0., 1., 3., 6., 10.]).astype(np.float64)\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_exclusive')\n\n @staticmethod\n def export_cumsum_1d_reverse() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n reverse=1\n )\n x = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\n axis = np.int32(0)\n y = np.array([15., 14., 12., 9., 5.]).astype(np.float64)\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_reverse')\n\n @staticmethod\n def export_cumsum_1d_reverse_exclusive() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n reverse=1,\n exclusive=1\n )\n x = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\n axis = np.int32(0)\n y = np.array([14., 12., 9., 5., 0.]).astype(np.float64)\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_reverse_exclusive')\n\n @staticmethod\n def export_cumsum_2d_axis_0() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n )\n x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\n axis = np.int32(0)\n y = np.array([1., 2., 3., 5., 7., 9.]).astype(np.float64).reshape((2, 3))\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_axis_0')\n\n @staticmethod\n def export_cumsum_2d_axis_1() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n )\n x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\n axis = np.int32(1)\n y = np.array([1., 3., 6., 4., 9., 15.]).astype(np.float64).reshape((2, 3))\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_axis_1')\n\n @staticmethod\n def export_cumsum_2d_negative_axis() -> None:\n node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n )\n x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\n axis = np.int32(-1)\n y = np.array([1., 3., 6., 4., 9., 15.]).astype(np.float64).reshape((2, 3))\n expect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_negative_axis')\n",
"# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass Acos(Base):\n\n @staticmethod\n def export() -> None:\n node = onnx.helper.make_node(\n 'Acos',\n inputs=['x'],\n outputs=['y'],\n )\n\n x = np.array([-0.5, 0, 0.5]).astype(np.float32)\n y = np.arccos(x)\n expect(node, inputs=[x], outputs=[y],\n name='test_acos_example')\n\n x = np.random.rand(3, 4, 5).astype(np.float32)\n y = np.arccos(x)\n expect(node, inputs=[x], outputs=[y],\n name='test_acos')\n",
"# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom onnx.backend.test.case.base import Base\nfrom onnx.backend.test.case.node import expect\n\n\nclass Squeeze(Base):\n\n @staticmethod\n def export_squeeze() -> None:\n node = onnx.helper.make_node(\n 'Squeeze',\n inputs=['x', 'axes'],\n outputs=['y'],\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.float32)\n axes = np.array([0], dtype=np.int64)\n y = np.squeeze(x, axis=0)\n\n expect(node, inputs=[x, axes], outputs=[y],\n name='test_squeeze')\n\n @staticmethod\n def export_squeeze_negative_axes() -> None:\n node = onnx.helper.make_node(\n 'Squeeze',\n inputs=['x', 'axes'],\n outputs=['y'],\n )\n x = np.random.randn(1, 3, 1, 5).astype(np.float32)\n axes = np.array([-2], dtype=np.int64)\n y = np.squeeze(x, axis=-2)\n expect(node, inputs=[x, axes], outputs=[y],\n name='test_squeeze_negative_axes')\n"
] | [
[
"numpy.random.uniform",
"numpy.minimum.reduce",
"numpy.array",
"numpy.random.seed"
],
[
"numpy.array",
"numpy.int32"
],
[
"numpy.random.rand",
"numpy.array",
"numpy.arccos"
],
[
"numpy.squeeze",
"numpy.array",
"numpy.random.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kritika-srivastava/The-Conurbation-Algorithm | [
"d5d39d701b1e09c975dceca5445c4398fd5fd93b",
"d5d39d701b1e09c975dceca5445c4398fd5fd93b",
"d5d39d701b1e09c975dceca5445c4398fd5fd93b"
] | [
"src/procedural_city_generation/polygons/getBlock.py",
"src/procedural_city_generation/additional_stuff/readimages.py",
"src/procedural_city_generation/building_generation/cuts.py"
] | [
"from __future__ import division\n\nimport numpy as np\n\nfrom procedural_city_generation.additional_stuff.Singleton import Singleton\nfrom procedural_city_generation.polygons.Polygon2D import Polygon2D\n\nsingleton = Singleton(\"polygons\")\n\n\ndef p_in_poly(poly, point):\n x, y = point\n n = len(poly)\n inside = False\n\n p1x, p1y = poly[0][0]\n for i in range(n+1):\n p2x, p2y = poly[i % n][0]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n\n return inside\n\n\ndef getBlock(wedges, vertex_list):\n '''Calculate block to be divided into lots, as well as street polygons'''\n\n old_vertices = [vertex_list[wedge.b] for wedge in wedges]\n old_poly = Polygon2D([v.coords for v in old_vertices])\n\n new_vertices = []\n polylist = []\n last2 = []\n\n for i in range(len(old_vertices)):\n\n # Calculate position of new vertex\n alpha = wedges[i-1].alpha\n a, b, c = old_vertices[i-2], old_vertices[i-1], old_vertices[i]\n v1 = a.coords - b.coords\n v2 = c.coords - b.coords\n n1 = np.array((-v1[1], v1[0]))/np.linalg.norm(v1)\n n2 = np.array((v2[1], -v2[0]))/np.linalg.norm(v2)\n\n # Change lengths of normal vectors depending on whether each\n # edge is a minor road or a main road\n if b.minor_road or a.minor_road:\n n1 *= singleton.minor_factor\n else:\n n1 *= singleton.main_factor\n if b.minor_road or c.minor_road:\n n2 *= singleton.minor_factor\n else:\n n2 *= singleton.main_factor\n\n # Check if current vertex is dead end\n if not 0 - 0.001 < alpha < 0 + 0.001:\n # Not a dead end: move edges which share this vertex\n # inwards along their normal vectors, find intersection\n try:\n intersection = np.linalg.solve(\n np.array(((v1), (v2))).T, (b.coords+n2)-(b.coords+n1))\n except np.linalg.LinAlgError:\n raise Exception(str(v1)+\", \"+str(v2),\n \"angle: \"+str(wedges[i-1].alpha))\n new = b.coords + n1 + intersection[0]*v1\n # Check if new vertex is in old polygon\n if p_in_poly(old_poly.edges, new):\n # Append new vertex to lot polygon\n new_vertices.append(new)\n these2 = [b.coords, new]\n if last2:\n street_vertices = last2 + these2\n polylist.append(\n Polygon2D(street_vertices, poly_type=\"road\"))\n last2 = these2[::-1]\n else:\n # New vertex not in polygon, return old polygon as street polygon\n return [old_poly]\n else:\n # Dead end: determine two new vertices by adding the two normals\n # to current vector, then check if these are in old polygon\n new1, new2 = b.coords + n1, b.coords + n2\n if p_in_poly(old_poly.edges, new1) and p_in_poly(old_poly.edges, new2):\n new_vertices += [new1, new2]\n if last2:\n street_vertices = last2 + [b.coords, new1]\n polylist.append(\n Polygon2D(street_vertices, poly_type=\"road\"))\n street_vertices = [b.coords, new2, new1]\n polylist.append(\n Polygon2D(street_vertices, poly_type=\"road\"))\n last2 = [new2, b.coords]\n\n else:\n old_poly.poly_type = \"road\"\n return [old_poly]\n street_vertices = last2 + [old_vertices[-1].coords, new_vertices[0]]\n polylist.append(Polygon2D(street_vertices, poly_type=\"road\"))\n\n # All new vertices are in old polygon: append block polygon\n block_poly = Polygon2D(new_vertices)\n if block_poly.area < singleton.max_area:\n block_poly.poly_type = \"lot\"\n polylist.append(block_poly)\n return polylist\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n import construct_polygons as cp\n polys, vertices = cp.main()\n for p in getBlock(polys[1], vertices):\n p.selfplot()\n plt.show()\n",
"def main():\n '''Deprecated. Was used at lange nacht der Wissenschaften to read images and highlight the way they impact the creation of the Strassennetz'''\n import matplotlib.pyplot as plt\n import matplotlib.image as mpimg\n import numpy as np\n\n\n\n img=mpimg.imread('./rawface.jpg')\n img2=np.ndarray(img.shape)\n\n for x in xrange(img.shape[0]):\n for y in xrange(img.shape[1]):#\n val=img[x][y]\n\n biggestval=np.argmax(val)\n arr=np.zeros(3)\n arr[biggestval]=1\n img[x][y]=val*arr\n val=val[biggestval]\n val=int(((val-75)*6)+75)\n val=max((250-val), 1)\n val=min(val, 254)\n img2[x][y]=np.array([val, val, val])\n\n plt.imsave('./Strassennetz/PythonVersion/Regelbilder/face.jpg', img)\n plt.imsave('./Strassennetz/PythonVersion/Bevoelkerungsdichtebilder/face.jpg', img2)\n\n fig=plt.figure()\n ax=plt.subplot(211)\n ax.imshow(img2)\n ax2=plt.subplot(212)\n ax2.imshow(img)\n plt.show()\n\n\n return 0\n\nif __name__ == '__main__':\n main()\n\n",
"\"\"\"\nCreated on 2015.08.17\n@author: Jonathan Sauder - [email protected]\n\"\"\"\n\nfrom __future__ import division\n\nimport random\nfrom copy import copy\n\nimport numpy as np\n\nimport procedural_city_generation\nfrom procedural_city_generation.building_generation.building_tools import *\nfrom procedural_city_generation.building_generation.Polygon3D import Polygon3D\n\n\ndef normal(arr):\n return np.array([-arr[1], arr[0], 0])\n\n\ndef randomcut(walls, housebool):\n \"\"\"\n Chooses a Cut for the creation of the floorplan from all available cuts.\n Every cut functions by adding/replacing values in the numpy array of\n the walls' vertices. There are two main cuts.\n All other cuts are a combination of these two cuts.\n The elementary cuts are::\n Ccut\n ------------ ----+ +----\n == > | |\n +---+\n Lcut\n --------+ ----+\n | == > |\n | +---+\n | |\n\n Parameters\n ----------\n - walls : procedural_city_generation.building_generation.walls object\n - housebool : boolean\n Value showing if a building is a house or not\n\n Returns\n ----------\n - procedural_city_generation.building_generation.walls object\n \"\"\"\n\n # TODO: Get numeric values in some sort of conf file\n\n n = random.randint(0, 100)\n a1 = random.uniform(0, 0.4)\n a2 = random.uniform(0, 0.4)\n s = random.randint(0, walls.l-1)\n\n if walls.l == 4:\n # Most \"advanced\" cuts rely on the assumption that the walls object\n # used to have 4 sides\n if (not housebool) or (random.uniform(0, 1) < 0.5):\n if n > 20:\n if n < 35:\n return Ccut(walls, a1, a2, s)\n elif n < 47:\n return Hcut(walls, a1, a2, s)\n elif n < 56:\n return Xcut(walls, a1, a2, s)\n elif n < 68:\n return Lcut(walls, a1, a2, s)\n elif n < 77:\n return Tcut(walls, a1, a2, s)\n elif n < 87:\n return Ycut(walls, a1, a2, s)\n elif n < 95:\n return Hcut2(walls, a1, a2, s)\n else:\n return Ccut2(walls, a1, a2, s)\n return walls\n # Those that do not, will create random combinations through Lcut and Ccut\n else:\n k = 0\n for i in range(walls.l):\n if random.randint(0, 100) > 50:\n if n > 40:\n walls = Ccut(walls, a1, a2, i+k)\n k += 4\n elif n > 45:\n walls = Lcut(walls, a1, a2, i+k)\n k += 2\n return walls\n\n\n# def Zcut(walls, dist1, dist2, side):\n# \"\"\"\n# Cuts a four-sided walls object as follows:\n# ::\n# +-----------+ +-------+\n# | | | |\n# | | == > +---+ |\n# | | | +---+\n# | | | |\n# +-----------+ +-------+\n\n# Parameters\n# ----------\n# walls : procedural_city_generation.building_generation.walls object\n# dist1 : float\n# Determines the length of one of the two vectors of the cut\n# dist2 : float\n# Determines the length of one of the two vectors of the cut\n# side : int\n# The pair of sides of the building which will be cut\n\n# Returns\n# -------\n# procedural_city_generation.building_generation.walls object\n# \"\"\"\n# side = (side % 2)+2\n# v2 = (walls.vertices[side]-walls.vertices[side-1])*dist2\n# v1 = (walls.vertices[side-2]-walls.vertices[side-1])*dist1\n# walls = Lcut(walls, dist1, dist2, side, v1, v2)\n# walls = Lcut(walls, dist1, dist2, side-2, -v1, -v2)\n# return walls\n\n\ndef Lcut(walls, dist1, dist2, side, v1=None, v2=None):\n \"\"\"\n Cuts a four-sided walls object as follows:\n ::\n +-----------+ +-----------+\n | | | |\n | | == > | |\n | | | +---+\n | | | |\n +-----------+ +-------+\n\n Parameters\n ----------\n walls : procedural_city_generation.building_generation.walls object\n dist1 : float\n Determines the length of one of the two vectors of the cut\n dist2 : float\n Determines the length of one of the two vectors of the cut\n side : int\n The pair of sides of the building which will be cut\n\n Returns\n -------\n procedural_city_generation.building_generation.walls object\n \"\"\"\n verts = walls.vertices\n v1 = v1 if (v1 is not None) else (verts[side-2]-verts[side-1])*dist1\n v2 = v2 if (v2 is not None) else (verts[side]-verts[side-1])*dist2\n verts = np.insert(verts, side, np.array(\n [verts[side-1]+v1+v2, verts[side-1]+v2]), axis=0)\n verts[side-1] += v1\n return Walls(verts, walls.l+2)\n\n\ndef Ccut(walls, dist1, dist2, side):\n \"\"\"\n Cuts a four-sided walls-object as follows:\n ::\n +-----------+ +---+ +---+\n | | | | | |\n | | == > | +---+ |\n | | | |\n | | | |\n +-----------+ +-----------+\n\n Parameters\n ----------\n walls : procedural_city_generation.building_generation.walls object\n dist1 : float\n Determines the length of one of the two vectors of the cut\n dist2 : float\n Determines the length of one of the two vectors of the cut\n side : int\n The pair of sides of the building which will be cut\n\n Returns\n -------\n procedural_city_generation.building_generation.walls object\n \"\"\"\n if dist2 < dist1:\n dist1, dist2 = dist2, dist1\n a = walls.vertices[side]\n v = walls.vertices[side-1]-a\n n = normal(v)\n a1 = a+dist2*v\n b1 = a+(1-dist2)*v\n a2 = a1+dist1*n\n b2 = b1+dist1*n\n\n return Walls(np.insert(walls.vertices, side, np.array([b1, b2, a2, a1]), axis=0), walls.l+4)\n\n\ndef Tcut(walls, dist1, dist2, side):\n \"\"\"\n Cuts a four-sided walls object as follows:\n ::\n +-----------+ +---+\n | | | |\n | | == > +---+ +---+\n | | | |\n | | | |\n +-----------+ +-----------+\n\n Parameters\n ----------\n walls : procedural_city_generation.building_generation.walls object\n dist1 : float\n Determines the length of one of the two vectors of the cut\n dist2 :\n float Determines the length of one of the two vectors of the cut\n side : int\n The pair of sides of the building which will be cut\n\n Returns\n -------\n procedural_city_generation.building_generation.walls object\n \"\"\"\n side = (side % 2)+2\n v2 = (walls.vertices[side]-walls.vertices[side-1])\n v1 = (walls.vertices[side-2]-walls.vertices[side-1])\n walls = Lcut(walls, dist1, dist2, side, v1*dist1, v2*dist2)\n walls = Lcut(walls, dist1, dist2, side-1, v2*dist2, -v1*dist1)\n return walls\n\n\ndef Ycut(walls, dist1, dist2, side):\n \"\"\"\n Cuts a four-sided walls object as follows:\n ::\n +-----------+ +---+\n | | | |\n | | == > +---+ +---+\n | | | |\n | | | +---+ |\n +-----------+ +---+ +---+\n\n Parameters\n ----------\n walls : procedural_city_generation.building_generation.walls object\n dist1 : float\n Determines the length of one of the two vectors of the cut\n dist2 : float\n Determines the length of one of the two vectors of the cut\n side : int\n The pair of sides of the building which will be cut\n\n Returns\n -------\n procedural_city_generation.building_generation.walls object\n \"\"\"\n walls = Tcut(walls, dist1/2, dist2/2, side)\n walls = Ccut(walls, dist1/2, dist2/2, side-3)\n return walls\n\n\ndef Hcut(walls, dist1, dist2, side):\n \"\"\"\n Cuts a four-sided walls object as follows:\n ::\n +-----------+ +---+ +---+\n | | | | | |\n | | == > | +---+ |\n | | | +---+ |\n | | | | | |\n +-----------+ +---+ +---+\n\n Parameters\n ----------\n walls : procedural_city_generation.building_generation.walls object\n dist1 : float\n Determines the length of one of the two vectors of the cut\n dist2 : float\n Determines the length of one of the two vectors of the cut\n side : int\n The pair of sides of the building which will be cut\n\n Returns\n -------\n procedural_city_generation.building_generation.walls object\n \"\"\"\n\n walls = Ccut(walls, dist1, dist2/2, side)\n walls = Ccut(walls, dist1, dist2/2, side-2)\n return walls\n\n\ndef Ccut2(walls, dist1, dist2, side):\n \"\"\"\n A (n=2) recursive Ccut\n \"\"\"\n walls = Ccut(walls, dist1, dist2/2, side)\n walls = Ccut(walls, dist1, dist2/2, side+4)\n walls = Ccut(walls, dist1, dist2/2, side)\n return walls\n\n\ndef Hcut2(walls, dist1, dist2, side):\n \"\"\"\n A (n=2) recursive Hcut\n \"\"\"\n\n side = side % 2\n walls = Ccut(walls, dist1, dist2/2, side)\n walls = Ccut(walls, dist1, dist2/2, side+4)\n walls = Ccut(walls, dist1, dist2/2, side)\n walls = Ccut(walls, dist1, dist2/2, side+14)\n walls = Ccut(walls, dist1, dist2/2, side+18)\n walls = Ccut(walls, dist1, dist2/2, side+14)\n return walls\n\n\ndef Xcut(walls, dist1, dist2, side):\n \"\"\"\n Cuts a four-sided walls object as follows:\n ::\n +-----------+ +---+ +---+\n | | | +---+ |\n | | == > +-+ +-+\n | | +-+ +-+\n | | | +---+ |\n +-----------+ +---+ +---+\n\n Parameters\n ----------\n walls : procedural_city_generation.building_generation.walls object\n dist1 : float\n Determines the length of one of the two vectors of the cut\n dist2 : float\n Determines the length of one of the two vectors of the cut\n side : int\n The pair of sides of the building which will be cut\n\n Returns\n -------\n procedural_city_generation.building_generation.walls object\n \"\"\"\n walls = Ccut(walls, dist1, dist2/4, 0)\n walls = Ccut(walls, dist1, dist2/4, 5)\n walls = Ccut(walls, dist1, dist2/4, 10)\n walls = Ccut(walls, dist1, dist2/4, 15)\n return walls\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.show",
"numpy.linalg.norm"
],
[
"matplotlib.pyplot.imsave",
"numpy.ndarray",
"matplotlib.image.imread",
"matplotlib.pyplot.subplot",
"numpy.argmax",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryan-dd/autonomous-systems | [
"39fa1394e6b9577600e52d9b7ecd9184a1c90ce1"
] | [
"extended_kalman_filter/extended_kalman_filter.py"
] | [
"from math import cos, sin, atan2, exp\n\nimport numpy as np\n\nfrom heading_range_robot.parameters import *\n\n\nclass EKF:\n def __init__(self, sample_period):\n self._change_t = sample_period\n self.mean_belief = np.vstack((INITIAL_X, INITIAL_Y, INITIAL_THETA))\n self.covariance_belief = np.eye(3)\n self.Qt = np.eye(2)*np.vstack((STD_DEV_LOCATION_RANGE**2, STD_DEV_LOCATION_BEARING**2))\n self.all_features = np.vstack((LANDMARK_1_LOCATION, LANDMARK_2_LOCATION, LANDMARK_3_LOCATION))\n\n def prediction_step(self, theta_prev, vc, wc):\n change_t = self._change_t\n theta = theta_prev\n # Jacobian of ut at xt-1\n Gt = np.array([\n [1, 0, -vc/wc*cos(theta) + vc/wc*cos(theta + wc*change_t)],\n [0, 1, -vc/wc*sin(theta) + vc/wc*sin(theta + wc*change_t)],\n [0, 0, 1]])\n # Jacobian to map noise in control space to state space\n Vt = np.array([\n [(-sin(theta) + sin(theta + wc*change_t))/wc, vc*(sin(theta)-sin(theta + wc*change_t))/(wc**2) + (vc*cos(theta + wc*change_t)*change_t)/wc],\n [(-cos(theta) + cos(theta + wc*change_t))/wc, vc*(cos(theta)-cos(theta + wc*change_t))/(wc**2) + (vc*sin(theta + wc*change_t)*change_t)/wc],\n [0, change_t]])\n\n Mt = np.array([\n [ALPHA1*vc**2 + ALPHA2*wc**2, 0],\n [0, ALPHA3*vc**2 + ALPHA4*wc**2]\n ])\n\n self.mean_belief = self.mean_belief + np.array([\n [-vc/wc*sin(theta) + vc/wc*sin(theta + wc*change_t)],\n [vc/wc*cos(theta) - vc/wc*cos(theta + wc*change_t)],\n [wc*change_t]\n ])\n\n self.covariance_belief = Gt @ self.covariance_belief @ Gt.T + Vt @ Mt @ Vt.T\n\n def measurement_step(self, true_state):\n Qt = self.Qt\n for feature in self.all_features:\n f_x = feature[0]\n f_y = feature[1]\n mean_x = self.mean_belief[0]\n mean_y = self.mean_belief[1]\n mean_theta = self.mean_belief[2]\n # Range and bearing from mean belief\n q = (f_x - mean_x)**2 + (f_y - mean_y)**2\n zti = np.array([\n [np.sqrt(q)],\n [np.arctan2((f_y - mean_y), (f_x - mean_x)) - mean_theta]]).reshape((2,1))\n measurement = simulate_measurement(true_state, f_x, f_y)\n\n Ht = np.array([\n [-(f_x - mean_x)/np.sqrt(q), -(f_y - mean_y)/np.sqrt(q), np.array([0])],\n [(f_y - mean_y)/q, -(f_x - mean_x)/q, np.array([-1])]]).reshape((2,3))\n covariance_belief = self.covariance_belief\n mean_belief = self.mean_belief\n St = Ht @ covariance_belief @ Ht.T + Qt\n Kt = covariance_belief @ Ht.T @ np.linalg.inv(St)\n self.mean_belief = mean_belief + Kt @ (measurement - zti)\n self.covariance_belief = (np.eye(len(Kt)) - Kt @ Ht) @ covariance_belief\n self.kt = Kt\n #pzt = np.linalg.det(2*pi*St)**(-1/2) @ exp(-1/2*(zti - measurement[index]).T @ np.linalg.inv(St) @ (zti - measurement[index]))\n\ndef simulate_measurement(true_state, f_x, f_y):\n true_x = true_state[0]\n true_y = true_state[1]\n true_theta = true_state[2]\n q = (f_x - true_x)**2 + (f_y - true_y)**2\n zt = np.array([\n [np.sqrt(q)],\n [np.arctan2((f_y - true_y), (f_x - true_x)) - true_theta]]).reshape((2,1))\n return zt + np.vstack((range_measurement_noise(), bearing_measurement_noise()))\n\ndef range_measurement_noise():\n return np.random.normal(0, STD_DEV_LOCATION_RANGE)\n\ndef bearing_measurement_noise():\n return np.random.normal(0, STD_DEV_LOCATION_BEARING)"
] | [
[
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.eye",
"numpy.arctan2",
"numpy.random.normal",
"numpy.array",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stoman/CompetitiveProgramming | [
"0000b64369b50e31c6f48939e837bdf6cece8ce4"
] | [
"problems/predictingofficespaceprice/submissions/accepted/stefan2.py"
] | [
"#!/usr/bin/env python2\n\n#Author: Stefan Toman\n\nimport itertools\nimport numpy as np\nfrom operator import mul\nfrom sklearn.linear_model import LinearRegression\n\nif __name__ == '__main__':\n #read input\n f, n = map(int, raw_input().split())\n X = []\n y = []\n for _ in range(n):\n line = raw_input().split()\n X.append([float(x) for x in line[:-1]])\n y.append([float(line[-1])])\n q = int(raw_input())\n Xt = []\n for _ in range(q):\n Xt.append([float(x) for x in raw_input().split()])\n #add new features as monomials of degree <= 3\n X = np.array(X)\n Xt = np.array(Xt)\n for i in range(2, 4):\n for var in itertools.product(range(f), repeat=i):\n X = np.hstack((X, reduce(mul, [X[:, j] for j in var]).reshape(-1, 1)))\n Xt = np.hstack((Xt, reduce(mul, [Xt[:, j] for j in var]).reshape(-1, 1)))\n #use sklearn to compute output\n for yt in LinearRegression().fit(X, y).predict(Xt):\n print(yt[0])\n "
] | [
[
"numpy.array",
"sklearn.linear_model.LinearRegression"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JoseLuisRojasAranda/tfmodels | [
"56dce0236f0cc03dd7031aecf305d470c9fb97a9",
"56dce0236f0cc03dd7031aecf305d470c9fb97a9"
] | [
"src/datasets/Fruits360/f360_dataset.py",
"src/datasets/coco/coco_to_tfrecord.py"
] | [
"import tensorflow as tf\nimport cv2\nfrom glob import glob\nimport sys\nimport os\nfrom os import path\nimport json\nimport random\n\nfrom datasets.datasets_features import bytes_feature\n\n# Metodo que regresa el dataset de f360 ya procesado a tfrecord\n# Los data set tiene el formato:\n# x: tensor con la imagen normalizada\n# y: tensor con onehot encoding de la categoria\n# Returns:\n# train_data: Dataset de entrenameinto\n# test_data: Dataset de pruebas\ndef f360_load_dataset(path=None, resize=None, num_classes=None):\n train_path = \"f360_train.tfrecord\"\n test_path = \"f360_test.tfrecord\"\n\n if path == None:\n path = \"\"\n\n train_raw_data = tf.data.TFRecordDataset(path+train_path)\n test_raw_data = tf.data.TFRecordDataset(path+test_path)\n\n _format = {\n \"x\": tf.io.FixedLenFeature([], tf.string),\n \"y\": tf.io.FixedLenFeature([], tf.string)\n }\n\n def _parse_example(example):\n ex = tf.io.parse_single_example(example, _format)\n x = tf.io.parse_tensor(ex[\"x\"], tf.float32)\n y = tf.io.parse_tensor(ex[\"y\"], tf.float32)\n y = tf.reshape(y, [-1])\n\n data_dict = {\n \"x\": x,\n \"y\": y\n }\n\n return x, y\n\n train_data = train_raw_data.map(_parse_example)\n test_data = test_raw_data.map(_parse_example)\n\n def _set_dataset_shape(x, y):\n x.set_shape([100, 100, 3])\n\n return x, y\n\n train_data = train_data.map(_set_dataset_shape)\n test_data = test_data.map(_set_dataset_shape)\n\n if resize != None:\n def _resize_dataset(x, y):\n x = tf.image.resize(x, [resize, resize])\n\n return x, y\n\n train_data = train_data.map(_resize_dataset)\n test_data = test_data.map(_resize_dataset)\n\n with open(path+\"dataset_info.json\", \"r\") as data:\n info = json.load(data)\n\n\n return train_data, test_data, info\n\n# Metodo que convierte el dataset de Fruits 360 a tfrecord, para despues usarlo\n# con el Dataset API de tensorflow\n# Args:\n# training_path: el path al dataset de training\n# test_path: el path al dataset de pruebas\n# num_imgs: numero de images a obtener, -1 para todas\n# result_path: el path donde se guarda el resultado\ndef f360_create_dataset(training_path=None, test_path=None, num_imgs=-1,\n result_path=None, delta=1, offset=0):\n # Crea la carpeta por si no existe donde se va a guardar el resultado\n if not path.exists(result_path):\n os.makedirs(result_path)\n\n process_cats = [\"Apple Golden 1\", \"Banana\", \"Orange\"]\n \"\"\"\n process_cats = [\"Apple Braeburn\", \"Apple Golden 1\", \"Avocado\", \"Lemon\",\n \"Limes\", \"Lychee\", \"Mandarine\", \"Banana\", \"Onion White\", \"Onion White\",\n \"Pear\", \"Orange\", \"Pineapple\", \"Potato White\", \"Strawberry\", \"Tomato 4\"]\n \"\"\"\n\n onehot_depth = len(process_cats)\n onehot_dict = { }\n for i in range(len(process_cats)):\n cat = process_cats[i]\n onehot_dict[cat] = i\n\n # Obtiene todas las categorias que existen\n cats = [x[1] for x in os.walk(training_path)][0]\n\n # Writer al tfrecord\n train_writer = tf.io.TFRecordWriter(result_path+\"f360_train.tfrecord\")\n test_writer = tf.io.TFRecordWriter(result_path+\"f360_test.tfrecord\")\n\n train_size = 0\n test_size = 0\n total_train_size = 0\n total_test_size = 0\n\n categories_size = { }\n\n # funcion que escribe una imagen al tfrecord\n def encode_image_info(image, category, writer):\n # Convierte la imagen a un tensor y lo normaliza \n image_tensor = tf.convert_to_tensor(image)\n image_tensor /= 255\n\n category = tf.one_hot([onehot_dict[category]], onehot_depth)\n\n # Genera los features para el example\n data = {\n \"x\": bytes_feature(tf.io.serialize_tensor(image_tensor)),\n \"y\": bytes_feature(tf.io.serialize_tensor(category))\n }\n\n example = tf.train.Example(features=tf.train.Features(feature=data))\n writer.write(example.SerializeToString())\n\n print(\"[INFO] Writing dataset to tfrecord\")\n # itera sobre todas las categorias a procesar\n for cat in process_cats:\n # si la categoria existe\n if cat in cats:\n print(\"[INFO] Writing {}...\".format(cat))\n train_size = test_size = 0\n # obtiene los paths\n train_img_path = glob(training_path+cat+\"/*.jpg\")\n test_img_path = glob(test_path+cat+\"/*.jpg\")\n\n # Ordena los paths\n train_img_path = sorted(train_img_path)\n test_img_path = sorted(test_img_path)\n\n # el numero de imagenes a que se van a ciclar\n n_train = n_test = num_imgs\n if n_train == -1:\n n_train = len(train_img_path)\n n_test = len(test_img_path)\n\n\n i = offset\n j = 0\n total = 0\n # escribe training images\n \"\"\"\n for i in range(n_train):\n img_path = train_img_path[i]\n image = cv2.imread(img_path)\n encode_image_info(image, cat, train_writer)\n train_size += 1\n \"\"\"\n while total < n_train:\n img_path = train_img_path[i]\n image = cv2.imread(img_path)\n encode_image_info(image, cat, train_writer)\n train_size += 1\n #i += random.randint(10, 20)\n i += delta\n if i >= n_train: i = i - n_train\n total += delta\n\n # escribe test images\n for j in range(n_test):\n img_path = test_img_path[j]\n image = cv2.imread(img_path)\n encode_image_info(image, cat, test_writer)\n test_size += 1\n\n categories_size[cat] = (train_size, test_size)\n\n total_train_size += train_size\n total_test_size += test_size\n\n train_writer.close()\n test_writer.close()\n\n dataset_info = {\n \"name\": \"Fruits 360 dataset\",\n \"num_classes\": len(process_cats),\n \"delta\": delta,\n \"offset\": offset,\n \"categories\": process_cats,\n \"train_size\": total_train_size,\n \"test_size\": total_test_size,\n \"categories_size\": categories_size\n }\n\n # Escribe el info del dataset\n with open(result_path+\"dataset_info.json\", \"w\") as writer:\n json.dump(dataset_info, writer, indent=4)\n\n",
"#\n# Script que convierte una subparte del dataset de COCO a un archivo\n# TFRecord para obtener mejor desempeño a la hora de entrenamiento.\n# El formato del archivo es el siguiente:\n# image_string: cadena conteniendo la imagen\n# []:\n# category: string\n# x: int\n# y: int\n# h: int\n# w: int\nimport os\nimport glob\nimport json\nimport cv2\nimport tensorflow as tf\nimport argparse\nfrom datasets_features import *\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-f\", \"--folder\", required=True, \n help=\"path to dataset folder\")\n ap.add_argument(\"-r\", \"--result\", required=True,\n help=\"path were the TFRecord will be saved\")\n args = vars(ap.parse_args())\n path = args[\"folder\"]\n res = args[\"result\"]\n\n coco_tfrecord(path, res)\n\ndef coco_tfrecord(path, res):\n print(\"[INFO] Loading images paths\")\n img_dirs = glob.glob(path + \"/*.jpg\")\n num_imgs = len(img_dirs)\n\n if num_imgs > 0:\n print(\"[INFO] Serializing dataset\")\n with tf.io.TFRecordWriter(res) as f:\n for i in range(num_imgs):\n print(\"Copied {} of {}\".format(i+1, num_imgs))\n example = _image_example(img_path=img_dirs[i], ann_path=img_dirs[i]+\".json\") \n f.write(example.SerializeToString())\n\n return None\n\n# Creates a tf.Example from a image with annotation\ndef _image_example(img_path=None, ann_path=None):\n image = cv2.imread(img_path)\n image_str = cv2.imencode(\".jpg\", image)[1].tostring()\n with open(ann_path) as json_text:\n ann = json.loads(json_text.read())\n\n cat_s = []\n x_s = []\n y_s = []\n w_s = []\n h_s = []\n for bbox in ann[\"bboxes\"]:\n cat_s.append(str.encode(bbox[\"category_id\"]))\n x_s.append(bbox[\"center_x\"])\n y_s.append(bbox[\"center_y\"])\n w_s.append(bbox[\"width\"])\n h_s.append(bbox[\"height\"])\n\n data = {\n \"img/filename\": bytes_feature(str.encode(ann[\"filename\"])),\n \"img/width\": int64_feature(ann[\"width\"]),\n \"img/height\": int64_feature(ann[\"height\"]),\n \"img/str\": bytes_feature(image_str),\n \"img/bboxes/category\": bytes_list_feature(cat_s),\n \"img/bboxes/x\": float_list_feature(x_s),\n \"img/bboxes/y\": float_list_feature(y_s),\n \"img/bboxes/width\": float_list_feature(w_s),\n \"img/bboxes/height\": float_list_feature(h_s)\n\n }\n\n return tf.train.Example(features=tf.train.Features(feature=data))\n\n\n\nmain()\n"
] | [
[
"tensorflow.io.TFRecordWriter",
"tensorflow.convert_to_tensor",
"tensorflow.data.TFRecordDataset",
"tensorflow.io.parse_single_example",
"tensorflow.reshape",
"tensorflow.io.FixedLenFeature",
"tensorflow.io.serialize_tensor",
"tensorflow.io.parse_tensor",
"tensorflow.image.resize",
"tensorflow.one_hot",
"tensorflow.train.Features"
],
[
"tensorflow.io.TFRecordWriter",
"tensorflow.train.Features"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tomasstolker/AMICAL | [
"c9bbf8e4a468313efff3b349fffea7648c411a51"
] | [
"amical/_cli/commands/clean.py"
] | [
"import os\nfrom datetime import datetime\nfrom glob import glob\nfrom pathlib import Path\n\nfrom astropy.io import fits\nfrom matplotlib import pyplot as plt\nfrom tabulate import tabulate\nfrom termcolor import cprint\nfrom tqdm import tqdm\n\nimport amical\n\n\ndef _select_data_file(args, process):\n \"\"\"Show report with the data found and allow to select one to be treated.\"\"\"\n l_file = sorted(glob(\"%s/*.fits\" % args.datadir))\n\n if len(l_file) == 0:\n print(\"No fits files found in %s, check --datadir.\" % args.datadir)\n return 1\n\n headers = [\"FILENAME\", \"TARGET\", \"DATE\", \"INSTRUM\", \"INDEX\"]\n\n index_file = []\n d = []\n for i, f in enumerate(l_file):\n with fits.open(f) as hdu:\n hdr = hdu[0].header\n target = hdr.get(\"OBJECT\", None)\n date = hdr.get(\"DATE-OBS\", None)\n ins = hdr.get(\"INSTRUME\", None)\n index_file.append(i)\n filename = f.split(\"/\")[-1]\n d.append([filename, target, date, ins, i])\n\n print(tabulate(d, headers=headers))\n\n if args.file >= 0:\n choosen_index = args.file\n else:\n choosen_index = int(input(\"\\nWhich file to %s?\\n\" % process))\n\n try:\n filename = l_file[choosen_index]\n except IndexError:\n print(\n \"Selected index (%i) not valid (only %i files found).\"\n % (choosen_index, len(l_file))\n )\n raise SystemExit\n else:\n with fits.open(filename) as hdul:\n hdr = hdul[0].header\n return filename, hdr\n\n\ndef perform_clean(args):\n \"\"\"Clean the data with AMICAL.\"\"\"\n cprint(\"---- AMICAL clean process ----\", \"cyan\")\n\n clean_param = {\n \"isz\": args.isz,\n \"r1\": args.r1,\n \"dr\": args.dr,\n \"apod\": args.apod,\n \"window\": args.window,\n \"f_kernel\": args.kernel,\n }\n\n if not os.path.exists(args.datadir):\n print(\n \"%s directory not found, check --datadir. AMICAL look for data only in this specified directory.\"\n % args.datadir\n )\n return 1\n\n l_file = sorted(glob(\"%s/*.fits\" % args.datadir))\n if len(l_file) == 0:\n print(\"No fits files found in %s, check --datadir.\" % args.datadir)\n return 1\n\n if not args.all:\n filename, hdr = _select_data_file(args, process=\"clean\")\n\n if args.check:\n amical.show_clean_params(filename, **clean_param)\n plt.show(block=True)\n return 0\n\n if not os.path.exists(args.outdir):\n os.mkdir(args.outdir)\n\n clean_param[\"clip\"] = args.clip\n clean_param[\"sky\"] = args.sky\n\n if args.all:\n # Clean all files in --datadir\n for f in tqdm(l_file, ncols=100, desc=\"# files\"):\n hdr = fits.open(f)[0].header\n hdr[\"HIERARCH AMICAL step\"] = \"CLEANED\"\n cube = amical.select_clean_data(f, **clean_param, display=True)\n f_clean = os.path.join(args.outdir, Path(f).stem + \"_cleaned.fits\")\n fits.writeto(f_clean, cube, header=hdr, overwrite=True)\n else:\n # Or clean just the specified file (in --datadir)\n hdr[\"HIERARCH AMICAL step\"] = \"CLEANED\"\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n hdr[\"HIERARCH AMICAL time\"] = dt_string\n for k in clean_param:\n hdr[\"HIERARCH AMICAL params %s\" % k] = clean_param[k]\n cube = amical.select_clean_data(filename, **clean_param, display=True)\n if args.plot:\n plt.show()\n f_clean = os.path.join(args.outdir, Path(filename).stem + \"_cleaned.fits\")\n fits.writeto(f_clean, cube, header=hdr, overwrite=True)\n return 0\n"
] | [
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kristianwiklund/AOC2019 | [
"a98affaccd53ca4ea2d3a8c3fa125680f1e8cc08"
] | [
"2018/7/7.py"
] | [
"import networkx as nx\nimport matplotlib.pyplot as plt\n\nG = nx.DiGraph()\n\n#with open (\"shortinput.txt\") as fd:\nwith open (\"input.txt\") as fd:\n\n for line in fd:\n x = line.split(\" \")\n before = x[1]\n after = x[7]\n G.add_edge(before, after, weight=ord(after)-64)\n\nnx.draw(G, with_labels=True)\nplt.savefig(\"maze.png\")\nhelalistan=list(nx.lexicographical_topological_sort(G))\nprint(\"7A :\"+\"\".join(helalistan))\n\n# ---------------------\n\n#ACHOQRXSEKUGMYIWDZLNBFTJVP\n\ntime=0\nworkers = [0,0,0,0,0,0,0,0,0,0]\ndoing = [None, None,None,None,None,None,None,None,None]\n\n\nwhile list(G.nodes()) != []:\n\n for i in range(0,6):\n\n if workers[i] <= 0:\n # finish what was done, then pull something\n if doing[i]:\n# print (\"Worker \"+str(i)+\" is done with \"+doing[i])\n G.remove_node(doing[i])\n doing[i] = None\n \n for j in helalistan:\n #print (\"Trying to pull node \"+j)\n if not j in doing:\n #print (\"Nobody is working on \"+j)\n if G.has_node(j) and list(G.predecessors(j)) == []:\n # print (\"Worker \"+str(i)+\" pulls node \"+j)\n doing[i] = j\n workers[i] = 60+ord(j)-65\n break\n \n else:\n workers[i] = workers[i] - 1\n\n # print(\"Tick: \"+str(time) + \" working on \"+str(doing))\n time=time+1\n\n\nprint(\"Total time for assembly: \"+str(time-1))\n\n \n \n \n \n \n"
] | [
[
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WadhwaniAI/ESTRNN | [
"4af8d53b0ebb1655c40aaf4f6950904580a34aa2"
] | [
"data/anthro.py"
] | [
"import os\nimport random\nfrom os.path import join, basename, dirname\n\nimport cv2\nimport numpy as np\nimport torch\nfrom glob import glob\nimport ipdb\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\n\nfrom utils import normalize, Crop, Flip, ToTensor\n\n\nclass AnthroDeblurDataset(Dataset):\n \"\"\"\n Structure of self_.records:\n seq:\n frame:\n path of images -> {'Blur': <path>, 'Sharp': <path>}\n \"\"\"\n\n def __init__(self, path, frames, future_frames, past_frames, crop_size=(256, 256), data_format='RGB',\n centralize=True, normalize=True):\n assert frames - future_frames - past_frames >= 1\n\n self.frames = frames\n self.num_ff = future_frames\n self.num_pf = past_frames\n self.data_format = data_format\n self.W = None\n self.H = None\n self.crop_h, self.crop_w = crop_size\n self.normalize = normalize\n self.centralize = centralize\n self.transform = transforms.Compose([Crop(crop_size), ToTensor()])\n self._seq_length = 200\n self._samples = self._generate_samples(path, data_format)\n\n def _generate_samples(self, dataset_path, data_format):\n samples = list()\n records = dict()\n seq = basename(dataset_path)\n\n records[seq] = list()\n frames = sorted(glob(join(dataset_path, '*.jpg')))\n for frame in frames[:self._seq_length]:\n sample = dict()\n sample['Blur'] = frame\n sample['Sharp'] = frame\n records[seq].append(sample)\n\n self.H, self.W, _ = cv2.imread(frame).shape\n\n\n for seq_records in records.values():\n temp_length = len(seq_records) - (self.frames - 1)\n if temp_length <= 0:\n raise IndexError('Exceed the maximum length of the video sequence')\n for idx in range(temp_length):\n samples.append(seq_records[idx:idx + self.frames])\n\n\n return samples\n\n def __getitem__(self, item):\n top = random.randint(0, self.H - self.crop_h)\n left = random.randint(0, self.W - self.crop_w)\n flip_lr = random.randint(0, 1)\n flip_ud = random.randint(0, 1)\n sample = {'top': top, 'left': left, 'flip_lr': flip_lr, 'flip_ud': flip_ud}\n\n blur_imgs, sharp_imgs = [], []\n for sample_dict in self._samples[item]:\n blur_img, sharp_img = self._load_sample(sample_dict, sample)\n blur_imgs.append(blur_img)\n sharp_imgs.append(sharp_img)\n sharp_imgs = sharp_imgs[self.num_pf:self.frames - self.num_ff]\n return [torch.cat(item, dim=0) for item in [blur_imgs, sharp_imgs]]\n\n def _load_sample(self, sample_dict, sample):\n if self.data_format == 'RGB':\n sample['image'] = cv2.imread(sample_dict['Blur'])\n sample['label'] = cv2.imread(sample_dict['Sharp'])\n else:\n raise NotImplementedError\n # elif self.data_format == 'RAW':\n # sample['image'] = cv2.imread(sample_dict['Blur'], -1)[..., np.newaxis].astype(np.int32)\n # sample['label'] = cv2.imread(sample_dict['Sharp'], -1)[..., np.newaxis].astype(np.int32)\n\n\n sample = self.transform(sample)\n val_range = 2.0 ** 8 - 1 if self.data_format == 'RGB' else 2.0 ** 16 - 1\n blur_img = normalize(sample['image'], centralize=self.centralize, normalize=self.normalize, val_range=val_range)\n sharp_img = normalize(sample['label'], centralize=self.centralize, normalize=self.normalize, val_range=val_range)\n\n return blur_img, sharp_img\n\n def __len__(self):\n return len(self._samples)\n\n\nclass Dataloader:\n def __init__(self, para, device_id, ds_type='train'):\n path = join(para.data_root, para.dataset)\n frames = para.frames\n dataset = AnthroDeblurDataset(path, frames, para.future_frames, para.past_frames, para.patch_size, para.data_format,\n para.centralize, para.normalize)\n gpus = para.num_gpus\n bs = para.batch_size\n ds_len = len(dataset)\n if para.trainer_mode == 'ddp':\n sampler = torch.utils.data.distributed.DistributedSampler(\n dataset,\n num_replicas=para.num_gpus,\n rank=device_id\n )\n self.loader = DataLoader(\n dataset=dataset,\n batch_size=para.batch_size,\n shuffle=False,\n num_workers=para.threads,\n pin_memory=True,\n sampler=sampler,\n drop_last=True\n )\n loader_len = np.ceil(ds_len / gpus)\n self.loader_len = int(np.ceil(loader_len / bs) * bs)\n\n elif para.trainer_mode == 'dp':\n self.loader = DataLoader(\n dataset=dataset,\n batch_size=para.batch_size,\n shuffle=True,\n num_workers=para.threads,\n pin_memory=True,\n drop_last=True\n )\n self.loader_len = int(np.ceil(ds_len / bs) * bs)\n\n def __iter__(self):\n return iter(self.loader)\n\n def __len__(self):\n return self.loader_len\n\n\nif __name__ == '__main__':\n from para import Parameter\n\n para = Parameter().args\n para.data_format = 'RGB'\n para.data_root = '/home/users/aditya/projects/ESTRNN/data/'\n para.dataset = 'anthro/358129091084785_19032020105115/video_1_baby_chessboard_ruler_4046788426114666387/'\n dataloader = Dataloader(para, 0)\n for x, y in dataloader:\n print(x.shape, y.shape)\n break\n print(x.type(), y.type())\n print(np.max(x.numpy()), np.min(x.numpy()))\n print(np.max(y.numpy()), np.min(y.numpy()))\n"
] | [
[
"numpy.ceil",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nagomiso/komono | [
"3158dc14ebaee724defe63d54c214d40065558d7"
] | [
"tests/test_reduce_memory.py"
] | [
"import pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal, assert_series_equal\n\nimport komono.pandas._reduce_memory as rd\n\n\[email protected]\ndef base_data():\n return {\n \"int8\": [-128, 127],\n \"int16\": [-129, 127],\n \"Int8\": [None, 127],\n \"Str\": [\"foo\", \"bar\"],\n }\n\n\[email protected]\ndef base_dtype():\n return {\n \"int8\": \"int64\",\n \"int16\": \"int64\",\n \"Int8\": \"Int64\",\n \"Str\": \"string\",\n }\n\n\[email protected]\ndef base_dataframe(base_data, base_dtype) -> pd.DataFrame:\n return pd.DataFrame.from_dict(base_data).astype(base_dtype)\n\n\[email protected](\n \"min_,max_,expected_dtype\",\n [\n (-128, 127, \"int8\"),\n (-128, 128, \"int16\"),\n (-129, 127, \"int16\"),\n (-129, 128, \"int16\"),\n (-32_768, 32_767, \"int16\"),\n (-32_768, 32_768, \"int32\"),\n (-32_769, 32_767, \"int32\"),\n (-32_769, 32_768, \"int32\"),\n (-2_147_483_648, 2_147_483_647, \"int32\"),\n (-2_147_483_648, 2_147_483_648, \"int64\"),\n (-2_147_483_649, 2_147_483_647, \"int64\"),\n (-2_147_483_649, 2_147_483_648, \"int64\"),\n ],\n)\ndef test_reduce_integer_series_not_nullable(min_, max_, expected_dtype):\n series = pd.Series([min_, max_], dtype=\"int64\")\n dtype = str(series.dtype)\n expected = pd.Series([min_, max_], dtype=expected_dtype)\n actual = rd._reduce_integer_series(series, dtype=dtype)\n assert_series_equal(actual, expected)\n\n\[email protected](\n \"min_,mid,max_,expected_dtype\",\n [\n (-128, None, 127, \"Int8\"),\n (-128, None, 128, \"Int16\"),\n (-129, None, 127, \"Int16\"),\n (-129, None, 128, \"Int16\"),\n (-32_768, None, 32_767, \"Int16\"),\n (-32_768, None, 32_768, \"Int32\"),\n (-32_769, None, 32_767, \"Int32\"),\n (-32_769, None, 32_768, \"Int32\"),\n (-2_147_483_648, None, 2_147_483_647, \"Int32\"),\n (-2_147_483_648, None, 2_147_483_648, \"Int64\"),\n (-2_147_483_649, None, 2_147_483_647, \"Int64\"),\n (-2_147_483_649, None, 2_147_483_648, \"Int64\"),\n ],\n)\ndef test_reduce_integer_series_nullable(min_, mid, max_, expected_dtype):\n series = pd.Series([min_, mid, max_], dtype=\"Int64\")\n dtype = str(series.dtype)\n expected = pd.Series([min_, mid, max_], dtype=expected_dtype)\n actual = rd._reduce_integer_series(series, dtype=dtype)\n assert_series_equal(actual, expected)\n\n\[email protected](\n \"min_,max_,expected_dtype\",\n [\n (-65500.0, 65500.0, \"float16\"),\n (-65500.0, 65600.0, \"float32\"),\n (-65600.0, 65500.0, \"float32\"),\n (-65600.0, 65600.0, \"float32\"),\n (-3.4028e38, 3.4028e38, \"float32\"),\n (-3.4028235e38, 3.4028335e38, \"float64\"),\n (-3.4028335e38, 3.4028235e38, \"float64\"),\n (-3.4028335e38, 3.4028335e38, \"float64\"),\n ],\n)\ndef test_reduce_float_series(min_, max_, expected_dtype):\n series = pd.Series([min_, max_], dtype=\"float64\")\n expected = pd.Series([min_, max_], dtype=expected_dtype)\n actual = rd._reduce_float_series(series)\n assert_series_equal(actual, expected)\n\n\ndef test_reduce_memory_usage(base_data, base_dataframe):\n expected = pd.DataFrame.from_dict(data=base_data,).astype(\n {\n \"int8\": \"int8\",\n \"int16\": \"int16\",\n \"Int8\": \"Int8\",\n \"Str\": \"string\",\n }\n )\n actual = rd.reduce_memory_usage(base_dataframe, verbose=True)\n assert_frame_equal(actual, expected)\n"
] | [
[
"pandas.testing.assert_frame_equal",
"pandas.testing.assert_series_equal",
"pandas.Series",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
swyang50066/sun-jupiter-earth-orbit | [
"c50012ff1a187485b717d86a24c25cfe6edd78a1"
] | [
"source/force.py"
] | [
"import numpy as np\n\nfrom allvar import *\n\n\ndef _distance(r1, r2):\n \"\"\"Return Euclidean _distance between positions\"\"\"\n return np.sqrt(np.sum((r1 - r2)**2.))\n\n\ndef drdt(r, v):\n \"\"\"Return position derivative\n\n :param r: shape: (x_earth, y_earth, x_jupiter, y_jupiter))\n :param v: shape: (vx_earth, vy_earth, vx_jupiter, vy_jupiter) \n :return: velocities\n \"\"\"\n return v\n\n\ndef dvdt(r, v, eps=1.e-20):\n \"\"\"Return position derivative\n\n Central star have fixed position at (0, 0)\n\n :param r: shape: (x_earth, y_earth, x_jupiter, y_jupiter)\n :param v: shape: (vx_earth, vy_earth, vx_jupiter, vy_jupiter)\n :return: accelerations\n \"\"\"\n # Geometric measurements\n r_se, r_sj, r_ej = r[:2], r[2:], r[2:] - r[:2]\n dist_se = _distance((0, 0), r_se)\n dist_sj = _distance((0, 0), r_sj)\n dist_ej = _distance(r_se, r_sj)\n\n theta_se = np.math.atan(np.abs(r_se[1])/(np.abs(r_se[0]) + eps))\n theta_sj = np.math.atan(np.abs(r_sj[1])/(np.abs(r_sj[0]) + eps))\n theta_ej = np.math.atan(np.abs(r_ej[1])/(np.abs(r_ej[0]) + eps))\n \n # Unit force functionals\n const_se = GG*(EARTH_MASS/SOLAR_MASS)\n f_se = -np.sign(r_se)*const_se*np.array(\n [ \n np.cos(theta_se)/(dist_se + eps)**2.,\n np.sin(theta_se)/(dist_se + eps)**2.\n ]\n )\n const_sj = GG*(JUPITER_MASS/SOLAR_MASS)\n f_sj = -np.sign(r_sj)*const_sj*np.array(\n [\n np.cos(theta_sj)/(dist_sj + eps)**2.,\n np.sin(theta_sj)/(dist_sj + eps)**2.\n ]\n )\n const_ej = GG*(EARTH_MASS*JUPITER_MASS/SOLAR_MASS**2.)\n f_ej = -np.sign(r_ej)*const_ej*np.array(\n [\n np.cos(theta_ej)/(dist_ej + eps)**2.,\n np.sin(theta_ej)/(dist_ej + eps)**2.\n ]\n )\n \n return np.hstack([\n (f_se - f_ej)/(EARTH_MASS/SOLAR_MASS),\n (f_sj + f_ej)/(JUPITER_MASS/SOLAR_MASS),\n ])\n"
] | [
[
"numpy.hstack",
"numpy.abs",
"numpy.cos",
"numpy.sin",
"numpy.sign",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haojiepan1/CrossWOZ | [
"6d7b4c4cfb73a528b76074764687906abecc90b6",
"6d7b4c4cfb73a528b76074764687906abecc90b6",
"6d7b4c4cfb73a528b76074764687906abecc90b6"
] | [
"tests/test_end2end.py",
"convlab2/nlg/template/multiwoz/evaluate.py",
"convlab2/policy/vhus/camrest/vhus.py"
] | [
"from convlab2.nlu.svm.multiwoz import SVMNLU\nfrom convlab2.nlu.jointBERT.multiwoz import BERTNLU\nfrom convlab2.nlu.milu.multiwoz import MILU\nfrom convlab2.dst.rule.multiwoz import RuleDST\nfrom convlab2.policy.rule.multiwoz import RulePolicy\nfrom convlab2.nlg.template.multiwoz import TemplateNLG\nfrom convlab2.dialog_agent import PipelineAgent, BiSession\nfrom convlab2.evaluator.multiwoz_eval import MultiWozEvaluator\nfrom pprint import pprint\nimport random\nimport numpy as np\nimport torch\n\nsys_nlu = BERTNLU(mode='all', config_file='multiwoz_all.json',\n model_file='https://tatk-data.s3-ap-northeast-1.amazonaws.com/bert_multiwoz_all.zip')\n# sys_nlu = SVMNLU(mode='sys')\n# simple rule DST\nsys_dst = RuleDST()\n# rule policy\nsys_policy = RulePolicy(character='sys')\n# template NLG\nsys_nlg = TemplateNLG(is_user=False)\n# assemble\nsys_agent = PipelineAgent(sys_nlu, sys_dst, sys_policy, sys_nlg, 'sys')\n\n# user_nlu = sys_nlu\n# user_nlu = SVMNLU(mode='all')\nuser_nlu = MILU(model_file=\"https://convlab.blob.core.windows.net/models/milu.tar.gz\")\n# not use dst\nuser_dst = None\n# rule policy\nuser_policy = RulePolicy(character='usr')\n# template NLG\nuser_nlg = TemplateNLG(is_user=True)\n# assemble\nuser_agent = PipelineAgent(user_nlu, None, user_policy, user_nlg, 'user')\n\nevaluator = MultiWozEvaluator()\nsess = BiSession(sys_agent=sys_agent, user_agent=user_agent, kb_query=None, evaluator=evaluator)\n\nrandom.seed(20200131)\nnp.random.seed(20190827)\ntorch.manual_seed(20200131)\nsys_response = ''\nsess.init_session()\nprint('init goal:')\npprint(sess.evaluator.goal)\nprint('-'*50)\nfor i in range(40):\n sys_response, user_response, session_over, reward = sess.next_turn(sys_response)\n print('user:', user_response)\n print('sys:', sys_response)\n print()\n if session_over is True:\n print('task complete:', user_policy.policy.goal.task_complete())\n print('task success:', sess.evaluator.task_success())\n print('book rate:', sess.evaluator.book_rate())\n print('inform precision/recall/f1:', sess.evaluator.inform_F1())\n print('-'*50)\n print('final goal:')\n pprint(sess.evaluator.goal)\n print('='*100)\n break\n\ntotal_dialog = 10\nrandom.seed(20200131)\ngoal_seeds = [random.randint(1,100000) for _ in range(total_dialog)]\nprecision = 0\nrecall = 0\nf1 = 0\nsuc_num = 0\ncomplete_num = 0\nfor j in range(total_dialog):\n sys_response = ''\n random.seed(goal_seeds[0])\n np.random.seed(goal_seeds[0])\n torch.manual_seed(goal_seeds[0])\n goal_seeds.pop(0)\n sess.init_session()\n # print('init goal:')\n # pprint(sess.evaluator.goal)\n # print('-'*50)\n for i in range(40):\n sys_response, user_response, session_over, reward = sess.next_turn(\n sys_response)\n # print('user:', user_response)\n # print('sys:', sys_response)\n if session_over is True:\n if sess.evaluator.task_success() == 1:\n suc_num = suc_num+1\n if user_policy.policy.goal.task_complete():\n complete_num += 1\n print('task complete:', user_policy.policy.goal.task_complete())\n print('task success:', sess.evaluator.task_success())\n print('book rate:', sess.evaluator.book_rate())\n print('inform precision/recall/f1:', sess.evaluator.inform_F1())\n stats = sess.evaluator.inform_F1()\n if(stats[0] != None):\n precision = precision+stats[0]\n if(stats[1] != None):\n recall = recall+stats[1]\n if(stats[2] != None):\n f1 = f1+stats[2]\n else:\n suc_num = suc_num-1\n # print('-'*50)\n # print('final goal:')\n # pprint(sess.evaluator.goal)\n # print('='*100)\n break\nprint(\"complete number of dialogs/tot:\", complete_num/total_dialog)\nprint(\"success number of dialogs/tot:\", suc_num/total_dialog)\nprint(\"average precision:\", precision/total_dialog)\nprint(\"average recall:\", recall/total_dialog)\nprint(\"average f1:\", f1/total_dialog)",
"\"\"\"\nEvaluate NLG models on utterances of Multiwoz test dataset\nMetric: dataset level BLEU-4, slot error rate\nUsage: python evaluate.py [usr|sys|all]\n\"\"\"\nimport json\nimport random\nimport sys\nimport zipfile\n\nimport numpy as np\nimport torch\nfrom nltk.translate.bleu_score import corpus_bleu, SmoothingFunction\n\nfrom convlab2.nlg.template.multiwoz import TemplateNLG\n\nseed = 2019\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n\n\ndef get_bleu4(dialog_acts, golden_utts, gen_utts):\n das2utts = {}\n for das, utt, gen in zip(dialog_acts, golden_utts, gen_utts):\n utt = utt.lower()\n gen = gen.lower()\n for da, svs in das.items():\n domain, act = da.split('-')\n if act == 'Request' or domain == 'general':\n continue\n else:\n for s, v in sorted(svs, key=lambda x: x[0]):\n if s == 'Internet' or s == 'Parking' or s == 'none' or v == 'none':\n continue\n else:\n v = v.lower()\n if (' ' + v in utt) or (v + ' ' in utt):\n utt = utt.replace(v, '{}-{}'.format(da, s), 1)\n if (' ' + v in gen) or (v + ' ' in gen):\n gen = gen.replace(v, '{}-{}'.format(da, s), 1)\n hash_key = ''\n for da in sorted(das.keys()):\n for s, v in sorted(das[da], key=lambda x: x[0]):\n hash_key += da + '-' + s + ';'\n das2utts.setdefault(hash_key, {'refs': [], 'gens': []})\n das2utts[hash_key]['refs'].append(utt)\n das2utts[hash_key]['gens'].append(gen)\n # pprint(das2utts)\n refs, gens = [], []\n for das in das2utts.keys():\n for gen in das2utts[das]['gens']:\n refs.append([s.split() for s in das2utts[das]['refs']])\n gens.append(gen.split())\n bleu = corpus_bleu(refs, gens, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=SmoothingFunction().method1)\n return bleu\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(\"usage:\")\n print(\"\\t python evaluate.py data_key\")\n print(\"\\t data_key=usr/sys/all\")\n sys.exit()\n data_key = sys.argv[1]\n if data_key=='all' or data_key=='usr':\n model_usr = TemplateNLG(is_user=True)\n if data_key=='all' or data_key=='sys':\n model_sys = TemplateNLG(is_user=False)\n\n archive = zipfile.ZipFile('../../../../data/multiwoz/test.json.zip', 'r')\n test_data = json.load(archive.open('test.json'))\n\n dialog_acts = []\n golden_utts = []\n gen_utts = []\n gen_slots = []\n\n sen_num = 0\n sess_num = 0\n for no, sess in list(test_data.items()):\n sess_num+=1\n print('[%d/%d]' % (sess_num, len(test_data)))\n for i, turn in enumerate(sess['log']):\n if i % 2 == 0 and data_key == 'sys':\n continue\n elif i % 2 == 1 and data_key == 'usr':\n continue\n sen_num += 1\n model = model_usr if i%2==0 else model_sys\n dialog_acts.append(turn['dialog_act'])\n golden_utts.append(turn['text'])\n gen_utts.append(model.generate(turn['dialog_act']))\n\n bleu4 = get_bleu4(dialog_acts, golden_utts, gen_utts)\n\n print(\"Calculate bleu-4\")\n print(\"BLEU-4: %.4f\" % bleu4)\n\n print('Model on {} session {} sentences data_key={}'.format(len(test_data), sen_num, data_key))\n",
"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport torch\nfrom convlab2.task.camrest.goal_generator import GoalGenerator\nfrom convlab2.policy.vhus.camrest.usermanager import UserDataManager\nfrom convlab2.policy.vhus.usermodule import VHUS\nfrom convlab2.policy.vhus.vhus import UserPolicyVHUSAbstract\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nDEFAULT_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models\")\nDEFAULT_ARCHIVE_FILE = os.path.join(DEFAULT_DIRECTORY, \"vhus_simulator_camrest.zip\")\n\nclass UserPolicyVHUS(UserPolicyVHUSAbstract):\n\n def __init__(self,\n archive_file=DEFAULT_ARCHIVE_FILE,\n model_file='https://tatk-data.s3-ap-northeast-1.amazonaws.com/vhus_simulator_camrest.zip'):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json'), 'r') as f:\n config = json.load(f)\n manager = UserDataManager()\n voc_goal_size, voc_usr_size, voc_sys_size = manager.get_voc_size()\n self.user = VHUS(config, voc_goal_size, voc_usr_size, voc_sys_size).to(device=DEVICE)\n self.goal_gen = GoalGenerator()\n self.manager = manager\n self.user.eval()\n\n self.load(archive_file, model_file, config['load'])\n"
] | [
[
"torch.manual_seed",
"numpy.random.seed"
],
[
"torch.manual_seed",
"numpy.random.seed"
],
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HenryJia/lightning-baselines3 | [
"10d1a0eed6136978204323250e37d49915a12e14"
] | [
"tests/on_policy_models/test_on_policy_model.py"
] | [
"from collections import OrderedDict\n\nimport pytest\n\nimport gym\nfrom gym import spaces\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch import distributions\n\nimport pytorch_lightning as pl\n\nfrom lightning_baselines3.on_policy_models.on_policy_model import OnPolicyModel\n\n\n\nclass DummyModel(OnPolicyModel):\n def __init__(self, *args, **kwargs):\n super(DummyModel, self).__init__(*args, **kwargs)\n\n if isinstance(self.action_space, spaces.Discrete):\n self.p = nn.Parameter(torch.ones(1, self.action_space.n) * 0.5)\n elif isinstance(self.action_space, spaces.Box):\n self.p = nn.Parameter(torch.ones(1, self.action_space.shape[0] * 2) * 0.5)\n else:\n raise Exception('Incompatible environment action space')\n\n\n def forward(self, x, **kwargs):\n p = self.p.expand(x.shape[0], self.p.shape[-1])\n if isinstance(self.action_space, spaces.Discrete):\n dist = distributions.Categorical(probs=F.softmax(p, dim=1))\n elif isinstance(self.action_space, spaces.Box):\n p = torch.chunk(p, 2, dim=1)\n dist = distributions.Normal(loc=p[0], scale=1 + p[1] ** 2)\n return dist, torch.ones_like(x)[:, :1]\n\n\n def predict(self, x, deterministic=True):\n p = self.p.expand(x.shape[0], self.p.shape[-1])\n if deterministic:\n if isinstance(self.action_space, spaces.Discrete):\n out = torch.max(p, dim=1)[1]\n elif isinstance(self.action_space, spaces.Box):\n out = torch.chunk(p, 2, dim=1)[0]\n else:\n if isinstance(self.action_space, spaces.Discrete):\n out = distributions.Categorical(probs=F.softmax(p, dim=1)).sample()\n elif isinstance(self.action_space, spaces.Box):\n p = torch.chunk(p, 2, dim=1)\n out = distributions.Normal(loc=p[0], scale=1 + p[1] ** 2).sample()\n return out.cpu().numpy()\n\n\n def training_step(self, x, batch_idx):\n loss = self(x.observations)[0].entropy().mean()\n self.log('loss', loss)\n return loss \n\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer\n\n\n\n\[email protected](\"env_id\", [\"CartPole-v1\", \"MountainCar-v0\", \"MountainCarContinuous-v0\"])\ndef test_on_policy_model(env_id):\n \"\"\"\n Check that environmnent integrated in Gym pass the test.\n\n :param env_id: (str)\n \"\"\"\n model = DummyModel(\n env_id,\n eval_env=env_id,\n buffer_length=512,\n num_rollouts=1,\n batch_size=32,\n epochs_per_rollout=10,\n num_eval_episodes=10,\n gamma=0.9,\n gae_lambda=0.95,\n use_sde=False,\n sde_sample_freq=-1,\n verbose=1,\n seed=1234)\n\n trainer = pl.Trainer(max_epochs=2, terminate_on_nan=True)\n trainer.fit(model)\n"
] | [
[
"torch.nn.functional.softmax",
"torch.ones",
"torch.max",
"torch.distributions.Normal",
"torch.chunk",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
golunovas/onnx-tensorflow | [
"b6340b3e66aa08af1ea4382e98257c2098177371"
] | [
"test/backend/test_node.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport math\nimport unittest\nimport numpy as np\nimport tensorflow as tf\nfrom onnx_tf.backend import run_node\nfrom onnx_tf.common import supports_device\nfrom onnx_tf.common.legacy import legacy_onnx_pre_ver, legacy_opset_pre_ver\nfrom onnx import helper\nfrom onnx import TensorProto\nfrom onnx import defs\n\n\nclass TestNode(unittest.TestCase):\n \"\"\" Tests for nodes\n \"\"\"\n\n def _get_rnd_float32(self, low=-1.0, high=1.0, shape=None):\n output = np.random.uniform(low, high, shape)\n if shape == None:\n return np.float32(output)\n else:\n return output.astype(np.float32)\n\n def _get_rnd_int(self, low, high=None, shape=None, dtype=np.int32):\n return np.random.randint(low, high, size=shape, dtype=dtype)\n\n def _elu(self, x):\n # f(x) = alpha * (exp(x) - 1.) for x < 0,\n # f(x) = x for x >= 0\n if x < 0.:\n return np.expm1(x)\n return x\n\n def _leaky_relu(self, x, alpha):\n # f(x) = alpha * x for x < 0,\n # f(x) = x for x >= 0\n if x < 0.:\n return alpha * x\n return x\n\n def test_abs(self):\n node_def = helper.make_node(\"Abs\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.abs(x))\n\n def test_acosh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Acosh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Acosh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.arccosh(x))\n\n def test_add(self):\n node_def = helper.make_node(\"Add\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[5, 10, 5, 5])\n y = self._get_rnd_float32(shape=[10, 1, 1])\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"],\n np.add(x, y.reshape([1, 10, 1, 1])))\n\n # node_def = helper.make_node(\"Add\", [\"A\", \"B\"], [\"C\"], broadcast=1)\n # a = self._get_rnd([10, 10])\n # b = self._get_rnd([10, 10])\n # output = run_node(node_def, [a, b])\n # np.testing.assert_almost_equal(output[\"C\"], np.add(a, b))\n\n # node_def = helper.make_node(\"Add\", [\"A\", \"B\"], [\"C\"], broadcast=1)\n # a = self._get_rnd([10, 10])\n # b = self._get_rnd([10,])\n # output = run_node(node_def, [a, b])\n # np.testing.assert_almost_equal(output[\"C\"], np.add(a, b))\n\n def test_arg_max(self):\n # TODO: need to fix this test\n return\n for axis in [0, 1]:\n node_def = helper.make_node(\n \"ArgMax\", [\"data\"], [\"reduced\"], axis=axis, keepdims=0)\n data = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [data])\n np.testing.assert_almost_equal(output[\"reduced\"],\n np.argmax(data, axis=axis))\n\n def test_arg_min(self):\n # TODO: need to fix this test\n return\n for axis in [0, 1]:\n node_def = helper.make_node(\n \"ArgMin\", [\"data\"], [\"reduced\"], axis=axis, keepdims=0)\n data = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [data])\n np.testing.assert_almost_equal(output[\"reduced\"],\n np.argmin(data, axis=axis))\n\n def test_asinh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Asinh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Asinh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.arcsinh(x))\n\n def test_atanh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Atanh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Atanh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.arctanh(x))\n\n def test_average_pool(self):\n # TODO: fix this test\n return\n device = \"CUDA\"\n if not supports_device(device):\n raise unittest.SkipTest(\n \"Backend doesn't support device {}\".format(device))\n shape = [1, 1, 40, 40]\n node_def = helper.make_node(\n \"AveragePool\", [\"X\"], [\"Y\"],\n kernel_shape=[1, 2],\n pads=[1, 1],\n strides=[1, 1])\n x = self._get_rnd_float32(shape=shape)\n output = run_node(node_def, [x], device=device)\n test_output = np.zeros(shape)\n for i1 in range(0, shape[0]):\n for i2 in range(0, shape[1]):\n for j1 in range(0, shape[2]):\n for j2 in range(0, shape[3]):\n test_output[i1][i2][j1][j2] = 0\n count = 0\n for k in range(j2, min(j2 + 2, shape[3])):\n test_output[i1][i2][j1][j2] += x[i1][i2][j1][k]\n count += 1\n test_output[i1][i2][j1][j2] /= count\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def _batch_normalization(self, x, mean, variance, bias, scale,\n variance_epsilon):\n inv = np.reciprocal(np.sqrt(variance + variance_epsilon))\n if scale is not None:\n inv *= scale\n return x * inv + (bias - mean * inv if bias is not None else -mean * inv)\n\n def test_batch_normalization(self):\n if legacy_opset_pre_ver(6):\n raise unittest.SkipTest(\"Backend doesn't support consumed flag\")\n node_def = helper.make_node(\n \"BatchNormalization\", [\"X\", \"scale\", \"bias\", \"mean\", \"var\"], [\"Y\"],\n epsilon=0.001)\n x_shape = [3, 5, 4, 2]\n param_shape = [5]\n _param_shape = [1, 5, 1, 1]\n x = self._get_rnd_float32(0, 1, shape=x_shape)\n m = self._get_rnd_float32(0, 1, shape=param_shape)\n _m = m.reshape(_param_shape)\n v = self._get_rnd_float32(0, 1, shape=param_shape)\n _v = v.reshape(_param_shape)\n scale = self._get_rnd_float32(0, 1, shape=param_shape)\n _scale = scale.reshape(_param_shape)\n bias = self._get_rnd_float32(0, 1, shape=param_shape)\n _bias = bias.reshape(_param_shape)\n golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)\n output = run_node(node_def, [x, scale, bias, m, v])\n np.testing.assert_almost_equal(output[\"Y\"], golden, decimal=5)\n\n def test_cast(self):\n if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):\n test_cases = [(\"FLOAT\", tf.float32), (\"UINT8\", tf.uint8),\n (\"INT8\", tf.int8), (\"UINT16\", tf.uint16), (\"INT16\",\n tf.int16),\n (\"INT32\", tf.int32), (\"INT64\", tf.int64), (\"BOOL\", tf.bool),\n (\"FLOAT16\", tf.float16), (\"DOUBLE\", tf.float64),\n (\"COMPLEX64\", tf.complex64), (\"COMPLEX128\", tf.complex128)]\n else:\n test_cases = [(TensorProto.FLOAT,\n tf.float32), (TensorProto.UINT8,\n tf.uint8), (TensorProto.INT8, tf.int8),\n (TensorProto.UINT16,\n tf.uint16), (TensorProto.INT16,\n tf.int16), (TensorProto.INT32, tf.int32),\n (TensorProto.INT64,\n tf.int64), (TensorProto.BOOL,\n tf.bool), (TensorProto.FLOAT16, tf.float16),\n (TensorProto.DOUBLE,\n tf.float64), (TensorProto.COMPLEX64,\n tf.complex64), (TensorProto.COMPLEX128,\n tf.complex128)]\n if not legacy_opset_pre_ver(9):\n test_cases.append((TensorProto.STRING, tf.string))\n for ty, tf_type in test_cases:\n node_def = helper.make_node(\"Cast\", [\"input\"], [\"output\"], to=ty)\n vector = [2, 3]\n output = run_node(node_def, [vector])\n np.testing.assert_equal(output[\"output\"].dtype, tf_type)\n\n if not legacy_opset_pre_ver(9):\n test_cases2 = [(TensorProto.FLOAT, tf.float32), (TensorProto.INT32,\n tf.int32),\n (TensorProto.INT64, tf.int64), (TensorProto.DOUBLE,\n tf.float64)]\n for ty, tf_type in test_cases2:\n node_def = helper.make_node(\"Cast\", [\"input\"], [\"output\"], to=ty)\n vector = ['2', '3']\n output = run_node(node_def, [vector])\n np.testing.assert_equal(output[\"output\"].dtype, tf_type)\n\n def test_ceil(self):\n node_def = helper.make_node(\"Ceil\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.ceil(x))\n\n def test_compress(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\n \"ONNX version {} doesn't support Compress.\".format(\n defs.onnx_opset_version()))\n axis = 1\n node_def = helper.make_node(\n \"Compress\", inputs=['X', 'condition'], outputs=['Y'], axis=axis)\n x = self._get_rnd_float32(shape=[5, 5, 5])\n cond = np.array([1, 0, 1])\n output = run_node(node_def, inputs=[x, cond])\n np.testing.assert_almost_equal(output['Y'], np.compress(cond, x, axis=axis))\n\n def test_concat(self):\n shape = [10, 20, 5]\n for axis in range(len(shape)):\n node_def = helper.make_node(\"Concat\", [\"X1\", \"X2\"], [\"Y\"], axis=axis)\n x1 = self._get_rnd_float32(shape=shape)\n x2 = self._get_rnd_float32(shape=shape)\n output = run_node(node_def, [x1, x2])\n np.testing.assert_almost_equal(output[\"Y\"], np.concatenate((x1, x2),\n axis))\n\n def test_constant(self):\n shape = [16, 16]\n values = np.random.randn(*shape).flatten().astype(float)\n const2_onnx = helper.make_tensor(\"const2\", TensorProto.DOUBLE, shape,\n values)\n node_def = helper.make_node(\"Constant\", [], [\"Y\"], value=const2_onnx)\n output = run_node(node_def, [])\n np.testing.assert_equal(output[\"Y\"].shape, shape)\n np.testing.assert_almost_equal(output[\"Y\"].flatten(), values)\n\n # test sparse tensor\n if not legacy_opset_pre_ver(11):\n expected = np.array([[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]])\n x = np.array([[0, 0], [1, 2]]).flatten().astype(np.int64)\n values = helper.make_tensor(\"values\", TensorProto.INT32, [2], [1, 2])\n indices = helper.make_tensor(\"indices\", TensorProto.INT64, [2, 2], x)\n a = helper.make_sparse_tensor(values, indices,[3, 4])\n node_def = helper.make_node(\"Constant\", [], [\"Y\"], sparse_value=a)\n output = run_node(node_def, [])\n b = tf.sparse_to_dense(output[\"Y\"].indices, output[\"Y\"].dense_shape, output[\"Y\"].values)\n result = b.eval(session=tf.Session())\n np.testing.assert_equal(result, expected)\n\n def test_constant_fill(self):\n if not legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\n \"ONNX version {} doesn't support ConstantFill.\".format(\n defs.onnx_opset_version()))\n shape = [1, 2, 3, 4]\n extra_shape = [5, 6]\n value = 3.\n node_def = helper.make_node(\n \"ConstantFill\",\n [\"X\"],\n [\"Y\"],\n value=value,\n extra_shape=extra_shape,\n dtype=1,\n )\n x = self._get_rnd_float32(shape=shape)\n y = np.zeros(shape + extra_shape)\n y.fill(value)\n output = run_node(node_def, [x])\n np.testing.assert_equal(output[\"Y\"].dtype, tf.float32)\n np.testing.assert_equal(output[\"Y\"], y)\n\n def test_constant_of_shape(self):\n if defs.onnx_opset_version() < 9:\n raise unittest.SkipTest(\n \"ONNX version {} doesn't support ConstantOfShape.\".format(\n defs.onnx_opset_version()))\n v = helper.make_tensor(\"value\", TensorProto.FLOAT, [1], [1])\n node_def = helper.make_node(\"ConstantOfShape\", [\"X\"], [\"Y\"], value=v)\n x = np.array([4, 3, 2])\n output = run_node(node_def, inputs=[x])\n np.testing.assert_almost_equal(output[\"Y\"], np.ones(x, dtype=np.float32))\n v = helper.make_tensor(\"value\", TensorProto.INT32, [1], [0])\n node_def = helper.make_node(\"ConstantOfShape\", [\"X\"], [\"Y\"], value=v)\n x = np.array([10, 6])\n output = run_node(node_def, inputs=[x])\n np.testing.assert_almost_equal(output[\"Y\"], np.zeros(x, dtype=np.int32))\n\n def test_conv(self):\n device = \"CUDA\"\n if not supports_device(device):\n raise unittest.SkipTest(\n \"Backend doesn't support device {}\".format(device))\n\n N, C, H, W = 4, 3, 5, 5\n x_shape = [N, C, H, W]\n K, kH, kW = 6, 3, 3\n weight_shape = [K, C, kH, kW]\n node_def = helper.make_node(\n \"Conv\", [\"X\", \"weights\"], [\"Y\"],\n pads=[1, 1, 1, 1],\n kernel_shape=[kH, kW])\n\n x = self._get_rnd_float32(shape=x_shape)\n weights = self._get_rnd_float32(shape=weight_shape)\n output = run_node(node_def, [x, weights], device=device)\n\n out_shape = [N, K, H, W]\n test_output = np.zeros(out_shape)\n for n in range(N):\n for c in range(C):\n for h in range(H):\n for w in range(W):\n for k in range(K):\n for kh in range(kH):\n for kw in range(kW):\n h_in_range = (h - kH // 2 + kh) < H and (\n h - kH // 2 + kh) >= 0\n w_in_range = (w - kW // 2 + kw) < W and (\n w - kW // 2 + kw) >= 0\n if h_in_range and w_in_range:\n test_output[n][k][h][w] += (x[n][c][h - kH // 2 + kh][\n w - kW // 2 + kw] * weights[k][c][kh][kw])\n\n np.testing.assert_almost_equal(output[\"Y\"], test_output, decimal=5)\n\n def test_conv_transpose(self):\n # Fix test in the future.\n return\n device = \"CUDA\"\n if not supports_device(device):\n raise unittest.SkipTest(\n \"Backend doesn't support device {}\".format(device))\n node_def = helper.make_node(\n \"ConvTranspose\", [\"X\", \"weights\"], [\"Y\"], pads=[1, 1])\n x_shape = [1, 5, 4]\n x = self._get_rnd(x_shape)\n weight_shape = [5, 3, 2]\n weights = self._get_rnd_float32(shape=weight_shape)\n output = run_node(node_def, [x, weights], device=device)\n out_shape = [x_shape[0], weight_shape[1], x_shape[2]]\n test_output = np.zeros(out_shape)\n for b in range(0, x_shape[0]):\n for m in range(0, weight_shape[1]):\n for h in range(0, x_shape[2]):\n v = 0\n for c in range(0, x_shape[1]):\n for k in range(h, min(h + weight_shape[2], x_shape[2])):\n v += x[b][c][k] * weights[c][m][k - h]\n test_output[b][m][h] = v\n np.testing.assert_almost_equal(output[\"Y\"], test_output, decimal=5)\n\n def test_cosh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Cosh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Cosh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.cosh(x))\n\n def test_depth_to_space(self):\n node_def = helper.make_node(\"DepthToSpace\", [\"X\"], [\"Y\"], blocksize=2)\n x_shape = [1, 12, 1, 1]\n x = self._get_rnd_float32(shape=x_shape)\n output = run_node(node_def, [x])\n x = np.transpose(x, (0, 2, 3, 1))\n y = np.reshape(np.swapaxes(x.reshape(1, 1, 1, 2, 2, 3), 2, 3), (1, 2, 2, 3))\n y = np.transpose(y, (0, 3, 1, 2))\n np.testing.assert_almost_equal(output[\"Y\"], y, decimal=5)\n\n def test_dequantize_linear(self):\n node_def = helper.make_node(\"DequantizeLinear\",\n [\"x\", \"x_scale\", \"x_zero_point\"], [\"y\"])\n for x, x_zero_point in [\n [\n self._get_rnd_int(-128, 127, [2, 6], np.int8),\n self._get_rnd_int(-128, 127, dtype=np.int8)\n ],\n [\n self._get_rnd_int(0, 255, [2, 6], np.uint8),\n self._get_rnd_int(0, 255, dtype=np.uint8)\n ],\n [\n self._get_rnd_int(-512, 512, [2, 6]),\n np.int32(0)\n ]\n ]:\n x_scale = self._get_rnd_float32(-10., 10)\n y = np.subtract(np.float32(x), np.float32(x_zero_point))\n y = np.multiply(y, x_scale)\n output = run_node(node_def, [x, x_scale, x_zero_point])\n np.testing.assert_almost_equal(output[\"y\"], y)\n\n def test_div(self):\n node_def = helper.make_node(\"Div\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[10, 10])\n y = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.divide(x, y))\n\n def test_dropout(self):\n # Since current ONNX only support inference and\n # dropout at inference mode is a no-op,\n # therefore dropout is always a no-op operator\n # in ONNX.\n node_def = helper.make_node(\"Dropout\", [\"X\"], [\"Y\"])\n if legacy_opset_pre_ver(7):\n # at inference mode, is_test is always set to 1\n node_def = helper.make_node(\"Dropout\", [\"X\"], [\"Y\"], is_test=1)\n x = self._get_rnd_float32(shape=[3, 4, 5])\n y = x\n output = run_node(node_def, [x])\n np.testing.assert_equal(output[\"Y\"], y)\n\n def test_dot(self):\n # this op is removed\n # remove this test in the future\n return\n node_def = helper.make_node(\"Dot\", [\"X\", \"Y\"], [\"Z\"])\n x = np.floor(self._get_rnd_float32(shape=[10, 10]))\n y = np.floor(self._get_rnd_float32(shape=[10, 10]))\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.dot(x, y))\n\n def test_elu(self):\n node_def = helper.make_node(\"Elu\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[100])\n output = run_node(node_def, [x])\n test_output = [self._elu(a) for a in x]\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_equal(self):\n node_def = helper.make_node(\"Equal\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[5, 3, 3, 2])\n y = self._get_rnd_float32(shape=[3, 3, 1])\n output = run_node(node_def, [x, y])\n np.testing.assert_equal(output[\"Z\"], np.equal(x, np.reshape(\n y, [1, 3, 3, 1])))\n\n def test_erf(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Erf.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Erf\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n exp_output = np.vectorize(math.erf)(x).astype(np.float32)\n np.testing.assert_almost_equal(output[\"Y\"], exp_output)\n\n def test_exp(self):\n node_def = helper.make_node(\"Exp\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[100])\n x = x - 3.6\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.exp(x))\n\n def test_eye_like(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support EyeLike.\".format(\n defs.onnx_opset_version()))\n for shape in [[6, 10], [10, 6]]:\n for off_diagonal_offset in [-10, -6, -3, 0, 3, 6, 7, 10]:\n node_def = helper.make_node(\n \"EyeLike\", ['x'], ['y'], dtype=1, k=off_diagonal_offset)\n x = self._get_rnd_int(0, 100, shape=shape)\n y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)\n output = run_node(node_def, [x])\n np.testing.assert_equal(output['y'], y)\n\n def test_flatten(self):\n # If input tensor has shape (d_0, d_1, ... d_n) then the\n # output will have shape:\n #\n # (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)\n #\n # TODO: pass axis attribute which is supported in newer\n # versions of onnx\n node_def = helper.make_node(\"Flatten\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[10, 2, 3, 4, 5])\n output = run_node(node_def, [x])\n # TODO: pass axis=3 and uncomment the line below\n # np.testing.assert_almost_equal(output[\"Y\"], x.reshape([60, 20]))\n np.testing.assert_almost_equal(output[\"Y\"], x.reshape([10, 120]))\n\n def test_gather(self):\n node_def = helper.make_node(\"Gather\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[10, 10])\n y = [[0, 1], [1, 2]]\n output = run_node(node_def, [x, y])\n test_output = np.zeros((2, 2, 10))\n for i in range(0, 2):\n for j in range(0, 10):\n test_output[0][i][j] = x[i][j]\n for i in range(0, 2):\n for j in range(0, 10):\n test_output[1][i][j] = x[i + 1][j]\n np.testing.assert_almost_equal(output[\"Z\"], test_output)\n\n def test_gemm(self):\n # Compute Y = alpha * A * B + beta * C\n node_def = helper.make_node(\n \"Gemm\", [\"A\", \"B\", \"C\"], [\"Y\"], transA=0, transB=0, alpha=1.0, beta=1.0)\n x = np.floor(self._get_rnd_float32(shape=[10, 10]))\n y = np.floor(self._get_rnd_float32(shape=[10, 10]))\n z = np.floor(self._get_rnd_float32(shape=[10, 10]))\n output = run_node(node_def, [x, y, z])\n test_output = np.matmul(x, y) + z\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_global_average_pool(self):\n # Image case: (N x C x H x W), where N is the batch size,\n # C is the number of channels, and H and W are the height\n # and the width of the data\n #\n # Non-image case: (N x C x D1 x D2 ... Dn)\n #\n # Output data tensor from pooling across the input tensor.\n # Dimensions will be N x C x 1 x 1\n node_def = helper.make_node(\"GlobalAveragePool\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[10, 10, 2, 3])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 1, 1])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n sum = 0\n for j1 in range(0, 2):\n for j2 in range(0, 3):\n sum += x[i1][i2][j1][j2]\n test_output[i1][i2][0][0] = sum / 6.\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_image_sacler(self):\n # Input: (N x C x H x W), where N is the batch size,\n # C is the number of channels, and H and W are the height\n # and the width of the data\n # Scale: (flout, default 1.0) the scale to apply\n # Bias: applied to each channel, same size as C\n # Output has same shape and type as input\n x = self._get_rnd_float32(shape=[1, 3, 224, 224])\n #random distribution over [0,1), so add 0.1\n scale = np.random.rand(1)[0] + 0.1\n bias = np.random.rand(3)\n node_def = helper.make_node(\n \"ImageScaler\", [\"X\"], [\"Y\"], scale=scale, bias=bias)\n output = run_node(node_def, [x])\n test_out = np.multiply(x, scale)\n test_out = np.transpose(test_out, [0, 2, 3, 1])\n test_out = np.add(test_out, bias)\n test_out = np.transpose(test_out, [0, 3, 1, 2])\n np.testing.assert_almost_equal(output[\"Y\"], test_out)\n\n def test_is_inf(self):\n if legacy_opset_pre_ver(10):\n raise unittest.SkipTest(\"ONNX version {} doesn't support IsInf.\".format(\n defs.onnx_opset_version()))\n input = np.array(\n [-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf], dtype=np.float32)\n expected_output = {\n \"node_def\": np.isinf(input),\n \"node_def_neg_false\": np.isposinf(input),\n \"node_def_pos_false\": np.isneginf(input)\n }\n node_defs = {\n \"node_def\":\n helper.make_node(\"IsInf\", [\"X\"], [\"Y\"]),\n \"node_def_neg_false\":\n helper.make_node(\"IsInf\", [\"X\"], [\"Y\"], detect_negative=0),\n \"node_def_pos_false\":\n helper.make_node(\"IsInf\", [\"X\"], [\"Y\"], detect_positive=0)\n }\n for key in node_defs:\n output = run_node(node_defs[key], [input])\n np.testing.assert_equal(output[\"Y\"], expected_output[key])\n\n def test_isnan(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support IsNaN.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"IsNaN\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 3])\n x[0][1] = x[1][0] = x[2][2] = np.nan\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.isnan(x))\n\n def test_global_lp_pool(self):\n # Image case: (N x C x H x W), where N is the batch size,\n # C is the number of channels, and H and W are the height\n # and the width of the data\n #\n # Non-image case: (N x C x D1 x D2 ... Dn)\n #\n # Output data tensor from pooling across the input tensor.\n # Dimensions will be N x C x 1 x 1\n node_def = helper.make_node(\"GlobalLpPool\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[10, 10, 2, 3])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 1, 1])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n tmp = np.zeros([2, 3])\n for j1 in range(0, 2):\n for j2 in range(0, 3):\n tmp[j1][j2] = x[i1][i2][j1][j2]\n test_output[i1][i2][0][0] = np.linalg.norm(tmp)\n np.testing.assert_almost_equal(output[\"Y\"], test_output, decimal=5)\n\n def test_global_max_pool(self):\n # Image case: (N x C x H x W), where N is the batch size,\n # C is the number of channels, and H and W are the height\n # and the width of the data\n #\n # Non-image case: (N x C x D1 x D2 ... Dn)\n #\n # Output data tensor from pooling across the input tensor.\n # Dimensions will be N x C x 1 x 1\n node_def = helper.make_node(\"GlobalMaxPool\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[10, 10, 2, 3])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 1, 1])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n max = x[i1][i2][0][0]\n for j1 in range(0, 2):\n for j2 in range(0, 3):\n if max < x[i1][i2][j1][j2]:\n max = x[i1][i2][j1][j2]\n test_output[i1][i2][0][0] = max\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_less(self):\n node_def = helper.make_node(\"Less\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[5, 3, 3, 2])\n y = self._get_rnd_float32(shape=[3, 3, 1])\n output = run_node(node_def, [x, y])\n np.testing.assert_equal(output[\"Z\"], np.less(x, np.reshape(y,\n [1, 3, 3, 1])))\n\n def test_lp_normalization(self):\n for ordr in range(1, 3):\n node_def = helper.make_node(\"LpNormalization\", [\"X\"], [\"Y\"], p=ordr)\n x = self._get_rnd([2, 2, 3, 2])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"],\n x / np.expand_dims(np.linalg.norm(x, axis=-1, ord=ordr), -1),\n rtol=1e-3)\n\n def test_l_r_n(self):\n # Each input value is divided by:\n #\n # (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta\n alpha = 2.0\n beta = 1.0\n bias = 5.0\n size = 3\n node_def = helper.make_node(\n \"LRN\", [\"X\"], [\"Y\"], alpha=alpha, beta=beta, bias=bias, size=size)\n x = self._get_rnd_float32(shape=[10, 2, 10, 10])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 10, 2])\n x = np.transpose(x, axes=[0, 2, 3, 1])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n for j1 in range(0, 10):\n for j2 in range(0, 2):\n sqr_sum = 0.\n # size of 3 means radius 1 in TF speak\n # i.e. the immediate neighbouring values\n # if \"previous\" neighbour exists\n if j2 > 0:\n sqr_sum += x[i1][i2][j1][j2 - 1] * x[i1][i2][j1][j2 - 1]\n # current value\n sqr_sum += x[i1][i2][j1][j2] * x[i1][i2][j1][j2]\n # if \"next\" neighbour exists\n if j2 < 2 - 1:\n sqr_sum += x[i1][i2][j1][j2 + 1] * x[i1][i2][j1][j2 + 1]\n test_output[i1][i2][j1][j2] = \\\n x[i1][i2][j1][j2] / ((bias + (alpha * 1. / size) * sqr_sum) ** beta)\n test_output = np.transpose(test_output, axes=[0, 3, 1, 2])\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_floor(self):\n node_def = helper.make_node(\"Floor\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[100])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.floor(x))\n\n def test_leakyrelu(self):\n node_def = helper.make_node(\"LeakyRelu\", [\"X\"], [\"Y\"], alpha=0.8)\n x = np.floor(self._get_rnd_float32(shape=[100]))\n output = run_node(node_def, [x])\n test_output = [self._leaky_relu(a, 0.8) for a in x]\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_log(self):\n node_def = helper.make_node(\"Log\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[100])\n x = x + 3.6\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.log(x))\n\n def test_max(self):\n node_def = helper.make_node(\"Max\", [\"X1\", \"X2\", \"X3\", \"X4\"], [\"Z\"])\n x1 = self._get_rnd_float32(shape=[10, 10])\n x2 = self._get_rnd_float32(shape=[10, 10])\n x3 = self._get_rnd_float32(shape=[10, 10])\n x4 = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x1, x2, x3, x4])\n test_output = np.maximum(np.maximum(np.maximum(x1, x2), x3), x4)\n np.testing.assert_almost_equal(output[\"Z\"], test_output)\n\n def test_max_pool(self):\n return\n node_def = helper.make_node(\n \"MaxPool\", [\"X\"], [\"Y\"],\n dilations=[1, 1],\n kernel_shape=[1, 2],\n pads=[0, 0],\n strides=[1, 2])\n x = self._get_rnd_float32(shape=[10, 10, 4, 4])\n output = run_node(node_def, [x])\n test_output = np.zeros([10, 10, 4, 2])\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n for j1 in range(0, 4):\n for j2 in range(0, 2):\n test_output[i1][i2][j1][j2] = \\\n max(x[i1][i2][j1][2*j2], x[i1][i2][j1][2*j2 + 1])\n np.testing.assert_almost_equal(output[\"Y\"], test_output)\n\n def test_mean_variance_normalization(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\n \"ONNX version {} doesn't have test for MeanVarianceNormalization\"\n .format(defs.onnx_opset_version()))\n\n input_data = self._get_rnd_float32(shape=[2,2,2,2])\n # Calculate expected output data using formula:\n # (Input - Mean)/SD\n mean = np.mean(input_data, keepdims=1, axis=(0,2,3))\n std = np.std(input_data, keepdims=1, axis=(0,2,3))\n expected_output = (input_data - mean) / std\n # Testing without \"axes\" argument should default to axes=[0,2,3]\n node_def = helper.make_node(\"MeanVarianceNormalization\", [\"X\"], [\"Y\"])\n output = run_node(node_def, [input_data])\n np.testing.assert_almost_equal(output[\"Y\"], expected_output, decimal=5)\n\n def test_min(self):\n node_def = helper.make_node(\"Min\", [\"X1\", \"X2\", \"X3\", \"X4\"], [\"Z\"])\n x1 = self._get_rnd_float32(shape=[10, 10])\n x2 = self._get_rnd_float32(shape=[10, 10])\n x3 = self._get_rnd_float32(shape=[10, 10])\n x4 = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x1, x2, x3, x4])\n test_output = np.minimum(np.minimum(np.minimum(x1, x2), x3), x4)\n np.testing.assert_almost_equal(output[\"Z\"], test_output)\n\n def test_mul(self):\n node_def = helper.make_node(\"Mul\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[5, 10, 5, 5])\n y = self._get_rnd_float32(shape=[10, 1, 1])\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"],\n np.multiply(x, y.reshape([1, 10, 1, 1])))\n\n def test_mod(self):\n if legacy_opset_pre_ver(10):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Mod.\".format(\n defs.onnx_opset_version()))\n x = self._get_rnd_float32(shape=[5, 5])\n y = self._get_rnd_float32(shape=[5, 5])\n node_def = helper.make_node(\"Mod\", [\"X\", \"Y\"], [\"Z\"], fmod=0)\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.mod(x, y))\n node_def = helper.make_node(\"Mod\", [\"X\", \"Y\"], [\"Z\"], fmod=1)\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.fmod(x, y))\n\n def test_neg(self):\n node_def = helper.make_node(\"Neg\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.negative(x))\n\n def test_non_zero(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support NonZero.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"NonZero\", [\"x\"], [\"y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n y = np.array(np.nonzero(x))\n output = run_node(node_def, [x])\n np.testing.assert_equal(output[\"y\"], y)\n\n def test_onehot(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support OneHot.\".format(\n defs.onnx_opset_version()))\n indices = np.array([[0, 2], [1, 2], [0, 1]])\n depth = np.int32(5)\n on_value = 6.0\n off_value = 2.0\n values = np.array([off_value, on_value])\n node_def = helper.make_node(\n 'OneHot', inputs=['indices', 'depth', 'values'], outputs=['y'], axis=-1)\n y = (np.arange(depth) == indices[..., None]).astype(int)\n y = y * (on_value - off_value) + off_value\n output = run_node(node_def, inputs=[indices, depth, values])\n np.testing.assert_equal(output['y'], y)\n\n def test_range(self):\n if legacy_opset_pre_ver(11):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Range.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\n \"Range\", ['start', 'limit', 'delta'], ['y'])\n # test positive_delta\n start = self._get_rnd_int(low=0, high=3)\n limit = self._get_rnd_int(low=10, high=30)\n delta = np.int32(3)\n output = run_node(node_def, [start, limit, delta])\n np.testing.assert_equal(output['y'], range(start, limit, delta))\n # test negative_delta\n start = self._get_rnd_int(low=20, high=30)\n limit = self._get_rnd_int(low=1, high=5)\n delta = np.int32(-2)\n output = run_node(node_def, [start, limit, delta])\n np.testing.assert_equal(output['y'], range(start, limit, delta))\n\n def test_round(self):\n if legacy_opset_pre_ver(11):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Round.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Round\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(-20.0, 20.0, shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.round(x))\n\n def test_relu(self):\n node_def = helper.make_node(\"Relu\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.maximum(x, 0))\n\n def test_pad(self):\n node_def = helper.make_node(\n \"Pad\", [\"X\"], [\"Y\"], mode=\"constant\", pads=[1, 1, 1, 1], value=2.0)\n x = self._get_rnd_float32(shape=[100, 100])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"],\n np.lib.pad(\n x, ((1, 1), (1, 1)),\n 'constant',\n constant_values=(2, 2)))\n\n def test_quantize_linear(self):\n node_def = helper.make_node(\"QuantizeLinear\",\n [\"x\", \"y_scale\", \"y_zero_point\"], [\"y\"])\n for x in [\n self._get_rnd_float32(-512., 512., [2, 6]),\n self._get_rnd_int(-512, 512, [2, 6])\n ]:\n y_scale = self._get_rnd_float32(-10., 10.)\n for y_zero_point in [\n self._get_rnd_int(-128, 127, dtype=np.int8),\n self._get_rnd_int(0, 255, dtype=np.uint8)\n ]:\n y = np.divide(x, y_scale)\n y = np.round(y)\n y = np.add(y, y_zero_point)\n if y_zero_point.dtype.type is np.int8:\n y = np.clip(y, -128, 127).astype(np.int8)\n else:\n y = np.clip(y, 0, 255).astype(np.uint8)\n output = run_node(node_def, [x, y_scale, y_zero_point])\n np.testing.assert_almost_equal(output[\"y\"], y)\n\n def test_reciprocal(self):\n node_def = helper.make_node(\"Reciprocal\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], 1.0 / x)\n\n def test_reduce_l1(self):\n node_def = helper.make_node(\"ReduceL1\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"],\n np.linalg.norm(x, 1, (1, 2), True))\n\n def test_reduce_log_sum_exp(self):\n node_def = helper.make_node(\"ReduceLogSumExp\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"],\n np.log(np.sum(np.exp(x), axis=(1, 2), keepdims=True)),\n rtol=1e-3)\n\n def test_reduce_max(self):\n node_def = helper.make_node(\"ReduceMax\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.max(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_mean(self):\n node_def = helper.make_node(\"ReduceMean\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.mean(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_min(self):\n node_def = helper.make_node(\"ReduceMin\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.min(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_prod(self):\n node_def = helper.make_node(\"ReduceProd\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[1, 5, 5, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.prod(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_sum(self):\n node_def = helper.make_node(\"ReduceSum\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.sum(x, (1, 2), keepdims=True), rtol=1e-3)\n\n def test_reduce_sum_square(self):\n node_def = helper.make_node(\"ReduceSumSquare\", [\"X\"], [\"Y\"], axes=[1, 2])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(\n output[\"Y\"], np.sum(np.square(x), (1, 2), keepdims=True), rtol=1e-3)\n\n def test_pow(self):\n node_def = helper.make_node(\"Pow\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=1000) / 2.0 + 0.5\n y = self._get_rnd_float32(shape=1000) / 2.0 + 0.5\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.power(x, y))\n\n def test_reshape(self):\n x = self._get_rnd_float32(shape=100)\n shape = [10, 10]\n if defs.onnx_opset_version() < 5:\n node_def = helper.make_node(\"Reshape\", [\"X\"], [\"Z\"], shape=shape)\n output = run_node(node_def, [x])\n else:\n node_def = helper.make_node(\"Reshape\", [\"X\", \"Y\"], [\"Z\"])\n output = run_node(node_def, [x, shape])\n\n np.testing.assert_almost_equal(output[\"Z\"], x.reshape([10, 10]))\n\n def test_reshape_with_copy(self):\n x = self._get_rnd_float32(shape=[10, 20 * 30])\n shape = [0, 20, 30]\n if defs.onnx_opset_version() < 5:\n node_def = helper.make_node(\"Reshape\", [\"X\"], [\"Z\"], shape=shape)\n output = run_node(node_def, [x])\n else:\n node_def = helper.make_node(\"Reshape\", [\"X\", \"Y\"], [\"Z\"])\n output = run_node(node_def, [x, shape])\n\n np.testing.assert_almost_equal(output[\"Z\"], x.reshape([10, 20, 30]))\n\n def test_selu(self):\n node_def = helper.make_node(\"Selu\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n alpha = 1.6732\n gamma = 1.0507\n x[x <= 0] = gamma * (alpha * np.exp(x[x <= 0]) - alpha)\n x[x > 0] = gamma * x[x > 0]\n np.testing.assert_allclose(output[\"Y\"], x, rtol=1e-3, atol=1e-7)\n\n def test_shape(self):\n node_def = helper.make_node(\"Shape\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_allclose(output[\"Y\"], np.shape(x))\n\n def test_shrink(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Shrink.\".format(\n defs.onnx_opset_version()))\n\n node_def = helper.make_node(\"Shrink\", [\"X\"], [\"Y\"], bias=1.5, lambd=1.5)\n\n X = np.arange(-2.0, 2.1, dtype=np.float32)\n Y = np.array([-0.5, 0, 0, 0, 0.5], dtype=np.float32)\n output = run_node(node_def, [X])\n np.testing.assert_almost_equal(output[\"Y\"], Y)\n\n def test_sigmoid(self):\n node_def = helper.make_node(\"Sigmoid\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], 1 / (1 + np.exp(-x)))\n\n def test_sign(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Sign.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Sign\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(-10, 10, [3, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.sign(x))\n\n def test_sinh(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Sinh.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Sinh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.sinh(x))\n\n def test_size(self):\n node_def = helper.make_node(\"Size\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[5, 10, 10, 3])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.size(x))\n\n def test_slice(self):\n # test case 1 with normal inputs\n axes = [0, 1, 2]\n starts = [0, 0, 0]\n ends = [2, 2, 2]\n steps = [1, 1, 1]\n\n if legacy_opset_pre_ver(10):\n node_def = helper.make_node(\n \"Slice\", [\"X\"], [\"S\"], axes=axes, starts=starts, ends=ends)\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"S\"], x[0:2, 0:2, 0:2])\n else:\n node_def = helper.make_node(\n \"Slice\", [\"X\", \"starts\", \"ends\", \"axes\", \"steps\"], [\"S\"])\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x, starts, ends, axes, steps])\n np.testing.assert_almost_equal(output[\"S\"], x[0:2, 0:2, 0:2])\n\n # test case 2 with negative, out-of-bound and default inputs\n axes = [0, 2]\n starts = [0, -7]\n ends = [-8, 20]\n\n if legacy_opset_pre_ver(10):\n node_def = helper.make_node(\n \"Slice\", [\"X\"], [\"S\"], axes=axes, starts=starts, ends=ends)\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"S\"], x[0:-8, :, -7:20])\n else:\n node_def = helper.make_node(\n \"Slice\", [\"X\", \"starts\", \"ends\", \"axes\"], [\"S\"])\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x, starts, ends, axes])\n np.testing.assert_almost_equal(output[\"S\"], x[0:-8, :, -7:20])\n\n # test case 3 with non-default steps\n axes = [0, 1, 2]\n starts = [0, 0, 0]\n ends = [2, 2, 2]\n steps = [2, -2, -1]\n\n if legacy_opset_pre_ver(10) == False:\n node_def = helper.make_node(\n \"Slice\", [\"X\", \"starts\", \"ends\", \"axes\", \"steps\"], [\"S\"])\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x, starts, ends, axes, steps])\n np.testing.assert_almost_equal(output[\"S\"], x[0:2:2, 0:2:-2, 0:2:-1])\n\n def test_softplus(self):\n node_def = helper.make_node(\"Softplus\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.log(np.exp(x) + 1))\n\n def test_softsign(self):\n node_def = helper.make_node(\"Softsign\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[3, 4, 5])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], x / (1 + np.abs(x)))\n\n def test_space_to_depth(self):\n node_def = helper.make_node(\"SpaceToDepth\", [\"X\"], [\"Y\"], blocksize=2)\n x_shape = [1, 3, 2, 2]\n x = self._get_rnd_float32(shape=x_shape)\n output = run_node(node_def, [x])\n x = np.transpose(x, (0, 2, 3, 1))\n y = np.reshape(\n np.swapaxes(x.reshape(1, 1, 1, 1, 1, 12), 2, 3), (1, 1, 1, 12))\n y = np.transpose(y, (0, 3, 1, 2))\n np.testing.assert_allclose(output[\"Y\"], y, rtol=1e-3)\n\n def test_split(self):\n split = [3, 3, 4]\n node_def = helper.make_node(\n \"Split\", [\"X\"], [\"Z%i\" % i for i in range(len(split))],\n axis=0,\n split=split)\n x = self._get_rnd_float32(shape=[100]).reshape([10, 10])\n\n output = run_node(node_def, [x])\n for a, b in zip(list(output), np.split(x, np.cumsum(split))[:-1]):\n np.testing.assert_almost_equal(a, b)\n\n def test_sqrt(self):\n node_def = helper.make_node(\"Sqrt\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000]) + 1.0\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.sqrt(x), decimal=5)\n\n def test_squeeze(self):\n node_def = helper.make_node(\"Squeeze\", [\"X\"], [\"Y\"], axes=[2])\n x = np.array([[[0], [1], [2]]])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.squeeze(x, axis=2))\n\n def test_sub(self):\n node_def = helper.make_node(\"Sub\", [\"X\", \"Y\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[10, 10])\n y = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.subtract(x, y))\n\n def test_sum(self):\n node_def = helper.make_node(\"Sum\", [\"X1\", \"X2\", \"X3\", \"X4\"], [\"Z\"])\n x1 = self._get_rnd_float32(shape=[10, 10])\n x2 = self._get_rnd_float32(shape=[10, 10])\n x3 = self._get_rnd_float32(shape=[10, 10])\n x4 = self._get_rnd_float32(shape=[10, 10])\n output = run_node(node_def, [x1, x2, x3, x4])\n test_output = x1 + x2 + x3 + x4\n np.testing.assert_almost_equal(output[\"Z\"], test_output)\n\n def test_tanh(self):\n node_def = helper.make_node(\"Tanh\", [\"X\"], [\"Y\"])\n x = self._get_rnd_float32(shape=[1000]) + 1.0\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.tanh(x), decimal=5)\n\n def test_thresholded_relu(self):\n alpha = 2.0\n node_def = helper.make_node(\n \"ThresholdedRelu\", [\"X\"], [\"Y\"], alpha=alpha)\n x = self._get_rnd_float32(-3.0, 3.0, [10])\n y = np.clip(x, alpha, np.inf)\n y[y == alpha] = 0\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], y)\n\n def test_tile(self):\n if legacy_onnx_pre_ver(1, 2):\n raise unittest.SkipTest(\n \"The current version of ONNX does not record correctly the opset of Tile.\"\n )\n node_def = helper.make_node(\"Tile\", [\"X1\", \"X2\"], [\"Z\"])\n x = self._get_rnd_float32(shape=[3, 5, 5, 3])\n repeats = [1, 1, 2, 1]\n output = run_node(node_def, [x, repeats])\n np.testing.assert_allclose(output[\"Z\"], np.tile(x, repeats), rtol=1e-3)\n\n def test_transpose(self):\n node_def = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 1])\n x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])\n output = run_node(node_def, [x])\n np.testing.assert_almost_equal(output[\"Y\"], np.transpose(x, (0, 2, 1)))\n\n def test_topk(self):\n x = np.arange(15, dtype=np.float32).reshape(3, 5)\n values = np.array([[4, 3], [9, 8], [14, 13]], dtype=np.float32)\n indices = np.array([[4, 3], [4, 3], [4, 3]], dtype=np.int64)\n if legacy_opset_pre_ver(10): # for opset = 1\n node_def = helper.make_node(\"TopK\", [\"x\"], [\"values\", \"indices\"], k=2)\n output = run_node(node_def, [x])\n elif legacy_opset_pre_ver(11): # for opset = 10\n k = np.array([2], dtype=np.int64)\n node_def = helper.make_node(\"TopK\", [\"x\", \"k\"], [\"values\", \"indices\"])\n output = run_node(node_def, [x, k])\n else: # for opset = 11\n x = np.array([[3, 2, 5, 10, 7], [12, 15, 10, 7, 20], [21, 16, 5, 3, 6]],\n dtype=np.float32)\n values = np.array([[3, 2], [10, 7], [5, 3]], dtype=np.float32)\n indices = np.array([[0, 1], [2, 3], [2, 3]], dtype=np.int64)\n k = np.array([2], dtype=np.int64)\n node_def = helper.make_node(\n \"TopK\", [\"x\", \"k\"], [\"values\", \"indices\"], largest=0, sorted=0)\n output = run_node(node_def, [x, k])\n np.testing.assert_almost_equal(output[\"values\"], values)\n np.testing.assert_almost_equal(output[\"indices\"], indices)\n\n def test_where(self):\n if legacy_opset_pre_ver(9):\n raise unittest.SkipTest(\"ONNX version {} doesn't support Where.\".format(\n defs.onnx_opset_version()))\n node_def = helper.make_node(\"Where\", [\"C\", \"X\", \"Y\"], [\"Z\"])\n c = np.array([[1, 0], [1, 1]], dtype=np.bool)\n x = np.array([[1, 2], [3, 4]], dtype=np.float32)\n y = np.array([[9, 8], [7, 6]], dtype=np.float32)\n output = run_node(node_def, [c, x, y])\n np.testing.assert_almost_equal(output[\"Z\"], np.where(c, x, y))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.dot",
"numpy.arctanh",
"numpy.sqrt",
"numpy.minimum",
"numpy.squeeze",
"numpy.cumsum",
"numpy.isneginf",
"numpy.round",
"numpy.max",
"numpy.concatenate",
"numpy.mean",
"numpy.argmin",
"numpy.random.randn",
"numpy.negative",
"numpy.exp",
"numpy.where",
"numpy.divide",
"numpy.random.randint",
"numpy.square",
"numpy.testing.assert_equal",
"numpy.fmod",
"tensorflow.sparse_to_dense",
"numpy.clip",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"numpy.matmul",
"numpy.subtract",
"numpy.testing.assert_almost_equal",
"numpy.std",
"numpy.ceil",
"numpy.size",
"numpy.argmax",
"numpy.float32",
"tensorflow.Session",
"numpy.zeros",
"numpy.lib.pad",
"numpy.log",
"numpy.cosh",
"numpy.multiply",
"numpy.nonzero",
"numpy.isnan",
"numpy.min",
"numpy.arccosh",
"numpy.power",
"numpy.random.rand",
"numpy.floor",
"numpy.transpose",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.arcsinh",
"numpy.sum",
"numpy.tanh",
"numpy.maximum",
"numpy.abs",
"numpy.int32",
"numpy.isposinf",
"numpy.compress",
"numpy.expm1",
"numpy.ones",
"numpy.linalg.norm",
"numpy.random.uniform",
"numpy.sign",
"numpy.sinh",
"numpy.shape",
"numpy.tile",
"numpy.prod",
"numpy.vectorize",
"numpy.mod",
"numpy.add",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Martin36/tapas | [
"2987658c3b65c5ab6e698d6c57823dc30d3d0f96"
] | [
"tapas/models/tapas_classifier_model_utils.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"TAPAS BERT model utils for classification.\"\"\"\n\nfrom typing import Dict, Text, Tuple, Optional\nfrom tapas.models import segmented_tensor\nimport tensorflow.compat.v1 as tf\n\nEPSILON_ZERO_DIVISION = 1e-10\nCLOSE_ENOUGH_TO_LOG_ZERO = -10000.0\n\n\ndef classification_initializer():\n \"\"\"Classification layer initializer.\"\"\"\n return tf.truncated_normal_initializer(stddev=0.02)\n\n\ndef extract_answer_from_features(\n features, use_answer_as_supervision\n):\n \"\"\"Extracts the answer, numeric_values, numeric_values_scale.\"\"\"\n if use_answer_as_supervision:\n answer = tf.squeeze(features[\"answer\"], axis=[1])\n numeric_values = features[\"numeric_values\"]\n numeric_values_scale = features[\"numeric_values_scale\"]\n else:\n answer = None\n numeric_values = None\n numeric_values_scale = None\n return answer, numeric_values, numeric_values_scale\n\n\ndef compute_token_logits(output_layer, temperature,\n init_cell_selection_weights_to_zero):\n \"\"\"Computes logits per token.\n\n Args:\n output_layer: <float>[batch_size, seq_length, hidden_dim] Output of the\n encoder layer.\n temperature: float Temperature for the Bernoulli distribution.\n init_cell_selection_weights_to_zero: Whether the initial weights should be\n set to 0. This ensures that all tokens have the same prior probability.\n\n Returns:\n <float>[batch_size, seq_length] Logits per token.\n \"\"\"\n hidden_size = output_layer.shape.as_list()[-1]\n output_weights = tf.get_variable(\n \"output_weights\", [hidden_size],\n initializer=tf.zeros_initializer()\n if init_cell_selection_weights_to_zero else classification_initializer())\n output_bias = tf.get_variable(\n \"output_bias\", shape=(), initializer=tf.zeros_initializer())\n logits = (tf.einsum(\"bsj,j->bs\", output_layer, output_weights) +\n output_bias) / temperature\n return logits\n\n\n# TODO(eisenjulian): Move more methods from tapas_classifier_model\ndef compute_column_logits(output_layer,\n cell_index,\n cell_mask,\n init_cell_selection_weights_to_zero,\n allow_empty_column_selection):\n \"\"\"Computes logits for each column.\n\n Args:\n output_layer: <float>[batch_size, seq_length, hidden_dim] Output of the\n encoder layer.\n cell_index: segmented_tensor.IndexMap [batch_size, seq_length] Index that\n groups tokens into cells.\n cell_mask: <float>[batch_size, max_num_rows * max_num_cols] Input mask per\n cell, 1 for cells that exists in the example and 0 for padding.\n init_cell_selection_weights_to_zero: Whether the initial weights should be\n set to 0. This is also applied to column logits, as they are used to\n select the cells. This ensures that all columns have the same prior\n probability.\n allow_empty_column_selection: Allow to select no column.\n\n Returns:\n <float>[batch_size, max_num_cols] Logits per column. Logits will be set to\n a very low value (such that the probability is 0) for the special id 0\n (which means \"outside the table\") or columns that do not apear in the\n table.\n \"\"\"\n hidden_size = output_layer.shape.as_list()[-1]\n column_output_weights = tf.get_variable(\n \"column_output_weights\", [hidden_size],\n initializer=tf.zeros_initializer()\n if init_cell_selection_weights_to_zero else classification_initializer())\n column_output_bias = tf.get_variable(\n \"column_output_bias\", shape=(), initializer=tf.zeros_initializer())\n token_logits = (\n tf.einsum(\"bsj,j->bs\", output_layer, column_output_weights) +\n column_output_bias)\n\n # Average the logits per cell and then per column.\n # Note that by linearity it doesn't matter if we do the averaging on the\n # embeddings or on the logits. For performance we do the projection first.\n # [batch_size, max_num_cols * max_num_rows]\n cell_logits, cell_logits_index = segmented_tensor.reduce_mean(\n token_logits, cell_index)\n\n column_index = cell_index.project_inner(cell_logits_index)\n # [batch_size, max_num_cols]\n column_logits, out_index = segmented_tensor.reduce_sum(\n cell_logits * cell_mask, column_index)\n cell_count, _ = segmented_tensor.reduce_sum(cell_mask, column_index)\n column_logits /= cell_count + EPSILON_ZERO_DIVISION\n\n # Mask columns that do not appear in the example.\n is_padding = tf.logical_and(cell_count < 0.5,\n tf.not_equal(out_index.indices, 0))\n column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32)\n\n if not allow_empty_column_selection:\n column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(\n tf.equal(out_index.indices, 0), tf.float32)\n\n return column_logits\n"
] | [
[
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.truncated_normal_initializer",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.einsum",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suhongkim/SSD-Vehicle-Detector | [
"8337d237f4c7923e55d02747ec37a60681e5beff"
] | [
"vehicle_detection.py"
] | [
"import os\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom cityscape_dataset import CityScapeDataset\r\nfrom ssd_util import load_dataset_list, load_dataset_list_original, show_loss, show_log\r\nfrom ssd_net import SSD\r\nfrom ssd_train import train_net\r\nfrom ssd_test import test_net\r\n\r\n\r\nif __name__ == '__main__':\r\n # Define Label Group\r\n dataset_label_group = {\r\n 'background': [],\r\n 'sm_veh': ['motorcycle', 'motorcyclegroup', 'bicycle', 'bicyclegroup'],\r\n 'med_veh': ['car', 'cargroup'],\r\n # 'ego_veh': ['ego vehicle'],\r\n 'big_veh': ['bus', 'trailer', 'truck'],\r\n # 'people': ['person', 'persongroup'],\r\n # 'riders': ['rider', 'ridergroup']\r\n }\r\n\r\n # Define Configurations\r\n config = {'is_gpu': True,\r\n 'debug': False,\r\n 'n_aug': 1,\r\n 'n_batch': 64,\r\n 'n_worker': 4,\r\n 'lr': 0.001,\r\n 'max_epoch': 100,\r\n 'save_epochs': [10,20,30,40,50,60,70,80,90],\r\n 'is_lr_scheduled': False,\r\n # 'class_labels': ['background', 'cargroup'],\r\n # 'class_labels': ['background', 'persongroup', 'person', 'cargroup', 'car'],\r\n 'label_groups': dataset_label_group,\r\n 'class_labels': list(dataset_label_group.keys()),\r\n 'is_train': True,\r\n 'is_test': True,\r\n 'results_path': '/home/suhongk/sfuhome/CMPT742/Lab3/vehicle_detection_v2/results/SSD__28th_16:47_best_model.pth'\r\n }\r\n\r\n # crop original image\r\n # person + persongroup , car+Cargroup\r\n # Overfitted data for the unaug\r\n # check training set\r\n\r\n # Default Cuda Setting -------------------------------------------------\r\n from torch.multiprocessing import Pool, Process, set_start_method\r\n try:\r\n set_start_method('spawn')\r\n except RuntimeError:\r\n pass\r\n\r\n if torch.cuda.is_available():\r\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\r\n torch.backends.cudnn.benchmark = True\r\n\r\n # load dataset_list -------------------------------------------------\r\n if config['is_gpu']:\r\n sample_path = '/home/datasets/full_dataset/train_extra/'\r\n label_path = '/home/datasets/full_dataset_labels/train_extra'\r\n else:\r\n sample_path = '../cityscapes_samples/'\r\n label_path = '../cityscapes_samples_labels/'\r\n\r\n dataset_list = load_dataset_list(sample_path, label_path, config['label_groups'])\r\n # dataset_list = load_dataset_list_original(sample_path, label_path, config['class_labels'])\r\n # Define dataset/dataloader -------------------------------------------\r\n num_train = int(0.3 * len(dataset_list))\r\n num_valid = int(0.1 * len(dataset_list))\r\n if config['is_train']:\r\n train_dataset = CityScapeDataset(dataset_list[:num_train], n_augmented=config['n_aug'], debug=config['debug'])\r\n train_loader = DataLoader(train_dataset, batch_size=config['n_batch'], shuffle=True, num_workers=config['n_worker'])\r\n print('Total training items: ', len(train_dataset))\r\n print('Total training batches size in one epoch: ', len(train_loader))\r\n\r\n valid_dataset = CityScapeDataset(dataset_list[num_train:(num_train + num_valid)], debug=config['debug'])\r\n valid_loader = DataLoader(valid_dataset, batch_size=config['n_batch'], shuffle=True, num_workers=config['n_worker'])\r\n print('Total validating items: ', len(valid_dataset))\r\n print('Total validating batches size in one epoch: ', len(valid_loader))\r\n\r\n if config['is_test']:\r\n test_dataset = CityScapeDataset(dataset_list[(num_train + num_valid):], debug=config['debug'])\r\n print('Total testing items: ', len(test_dataset))\r\n\r\n # Train network -----------------------------------------------------\r\n if config['is_train']:\r\n lab_results_dir = \"./results/\" # for the results\r\n results_path = train_net(train_loader, valid_loader, config['class_labels'], lab_results_dir,\r\n learning_rate=config['lr'], is_lr_scheduled=config['is_lr_scheduled'],\r\n max_epoch=config['max_epoch'], save_epochs=config['save_epochs'])\r\n print('\\n\\n-----------------------\\n\\tresult_path:', results_path)\r\n if not config['is_gpu']:\r\n show_loss(results_path + '.loss')\r\n # show_log(results_path + '__train.log')\r\n # show_log(results_path + '__valid.log')\r\n if config['is_test']:\r\n test_net(test_dataset, config['class_labels'], (results_path + '__model.pth'))\r\n # Train network -----------------------------------------------------\r\n if config['is_test'] and not config['is_train']:\r\n test_net(test_dataset, config['class_labels'], config['results_path'])\r\n # pass\r\n # Test Code ----------------------------------------------------------\r\n # idx, (imgs, bbox_label, bbox_indices, _) = next(enumerate(train_loader))\r\n # print(bbox_indices)\r\n # test_dataset.__getitem__(9)\r\n # net = SSD(len(class_labels))\r\n # net.cuda()\r\n # net.forward(torch.rand(1, 3, 300, 300))\r\n\r\n\r\n\r\n\r\n"
] | [
[
"torch.multiprocessing.set_start_method",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.set_default_tensor_type"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DavidLesnjak/CMSIS_5 | [
"e0848410d137758a3356a5ee94ca4501cea708a8",
"e0848410d137758a3356a5ee94ca4501cea708a8",
"e0848410d137758a3356a5ee94ca4501cea708a8"
] | [
"CMSIS/DSP/Examples/ARM/arm_bayes_example/train.py",
"CMSIS/DSP/Testing/PatternGeneration/Stats.py",
"CMSIS/DSP/Testing/addToRegDB.py"
] | [
"from sklearn.naive_bayes import GaussianNB\nimport random\nimport numpy as np\nimport math\n\nfrom pylab import scatter,figure, clf, plot, xlabel, ylabel, xlim, ylim, title, grid, axes, show,semilogx, semilogy\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\n\n# Generation of data to train the classifier\n# 100 vectors are generated. Vector have dimension 2 so can be represented as points\nNBVECS = 100\nVECDIM = 2\n\n# 3 cluster of points are generated\nballRadius = 1.0\nx1 = [1.5, 1] + ballRadius * np.random.randn(NBVECS,VECDIM)\nx2 = [-1.5, 1] + ballRadius * np.random.randn(NBVECS,VECDIM)\nx3 = [0, -3] + ballRadius * np.random.randn(NBVECS,VECDIM)\n\n# All points are concatenated\nX_train=np.concatenate((x1,x2,x3))\n\n# The classes are 0,1 and 2.\nY_train=np.concatenate((np.zeros(NBVECS),np.ones(NBVECS),2*np.ones(NBVECS)))\n\ngnb = GaussianNB()\ngnb.fit(X_train, Y_train)\n\nprint(\"Testing\")\ny_pred = gnb.predict([[1.5,1.0]])\nprint(y_pred)\n\ny_pred = gnb.predict([[-1.5,1.0]])\nprint(y_pred)\n\ny_pred = gnb.predict([[0,-3.0]])\nprint(y_pred)\n\n# Dump of data for CMSIS-DSP\n\nprint(\"Parameters\")\n# Gaussian averages\nprint(\"Theta = \",list(np.reshape(gnb.theta_,np.size(gnb.theta_))))\n\n# Gaussian variances\nprint(\"Sigma = \",list(np.reshape(gnb.sigma_,np.size(gnb.sigma_))))\n\n# Class priors\nprint(\"Prior = \",list(np.reshape(gnb.class_prior_,np.size(gnb.class_prior_))))\n\nprint(\"Epsilon = \",gnb.epsilon_)\n\n\n# Some bounds are computed for the graphical representation\nx_min = X_train[:, 0].min()\nx_max = X_train[:, 0].max()\ny_min = X_train[:, 1].min()\ny_max = X_train[:, 1].max()\n\nfont = FontProperties()\nfont.set_size(20)\n\nr=plt.figure()\nplt.axis('off')\nplt.text(1.5,1.0,\"A\", verticalalignment='center', horizontalalignment='center',fontproperties=font)\nplt.text(-1.5,1.0,\"B\",verticalalignment='center', horizontalalignment='center', fontproperties=font)\nplt.text(0,-3,\"C\", verticalalignment='center', horizontalalignment='center',fontproperties=font)\nscatter(x1[:,0],x1[:,1],s=1.0,color='#FF6B00')\nscatter(x2[:,0],x2[:,1],s=1.0,color='#95D600')\nscatter(x3[:,0],x3[:,1],s=1.0,color='#00C1DE')\n#r.savefig('fig.jpeg')\n#plt.close(r)\nshow()",
"import os.path\nimport itertools\nimport Tools\nimport random\nimport numpy as np\nimport scipy\nimport scipy.stats\nimport math\n\nNBTESTS = 10\nVECDIM = [12,14,20]\n\ndef entropyTest(config,nb):\n DIMS = [3,8,9,12]\n inputs = [] \n outputs = [] \n dims=[NBTESTS]\n for i in range(0,NBTESTS):\n vecDim = DIMS[i % len(DIMS)]\n dims.append(vecDim)\n v = np.random.rand(vecDim)\n v = v / np.sum(v)\n e = scipy.stats.entropy(v)\n inputs += list(v)\n outputs.append(e)\n inputs = np.array(inputs)\n outputs = np.array(outputs)\n dims = np.array(dims)\n config.writeInput(nb, inputs,\"Input\")\n config.writeInputS16(nb, dims,\"Dims\")\n config.writeReference(nb, outputs,\"RefEntropy\")\n\ndef logsumexpTest(config,nb):\n DIMS = [3,8,9,12]\n inputs = [] \n outputs = [] \n dims=[NBTESTS]\n for i in range(0,NBTESTS):\n vecDim = DIMS[i % len(DIMS)]\n dims.append(vecDim)\n v = np.random.rand(vecDim)\n v = v / np.sum(v)\n e = scipy.special.logsumexp(v)\n inputs += list(v)\n outputs.append(e)\n inputs = np.array(inputs)\n outputs = np.array(outputs)\n dims = np.array(dims)\n config.writeInput(nb, inputs,\"Input\")\n config.writeInputS16(nb, dims,\"Dims\")\n config.writeReference(nb, outputs,\"RefLogSumExp\")\n\ndef klTest(config,nb):\n DIMS = [3,8,9,12]\n inputsA = [] \n inputsB = [] \n outputs = [] \n vecDim = VECDIM[nb % len(VECDIM)]\n dims=[NBTESTS]\n for i in range(0,NBTESTS):\n vecDim = DIMS[i % len(DIMS)]\n dims.append(vecDim)\n va = np.random.rand(vecDim)\n va = va / np.sum(va)\n\n vb = np.random.rand(vecDim)\n vb = vb / np.sum(vb)\n\n e = scipy.stats.entropy(va,vb)\n inputsA += list(va)\n inputsB += list(vb)\n outputs.append(e)\n inputsA = np.array(inputsA)\n inputsB = np.array(inputsB)\n outputs = np.array(outputs)\n dims = np.array(dims)\n config.writeInput(nb, inputsA,\"InputA\")\n config.writeInput(nb, inputsB,\"InputB\")\n config.writeInputS16(nb, dims,\"Dims\")\n config.writeReference(nb, outputs,\"RefKL\")\n\ndef logSumExpDotTest(config,nb):\n DIMS = [3,8,9,12]\n inputsA = [] \n inputsB = [] \n outputs = [] \n vecDim = VECDIM[nb % len(VECDIM)]\n dims=[NBTESTS]\n for i in range(0,NBTESTS):\n vecDim = DIMS[i % len(DIMS)]\n dims.append(vecDim)\n va = np.random.rand(vecDim)\n va = va / np.sum(va)\n\n vb = np.random.rand(vecDim)\n vb = vb / np.sum(vb)\n\n d = 0.001\n # It is a proba so must be in [0,1]\n # But restricted to ]d,1] so that the log exists\n va = (1-d)*va + d\n vb = (1-d)*vb + d\n e = np.log(np.dot(va,vb))\n va = np.log(va)\n vb = np.log(vb)\n\n inputsA += list(va)\n inputsB += list(vb)\n outputs.append(e)\n inputsA = np.array(inputsA)\n inputsB = np.array(inputsB)\n outputs = np.array(outputs)\n dims = np.array(dims)\n config.writeInput(nb, inputsA,\"InputA\")\n config.writeInput(nb, inputsB,\"InputB\")\n config.writeInputS16(nb, dims,\"Dims\")\n config.writeReference(nb, outputs,\"RefLogSumExpDot\")\n\ndef writeF16OnlyTests(config,nb):\n entropyTest(config,nb)\n logsumexpTest(config,nb+1)\n klTest(config,nb+2)\n logSumExpDotTest(config,nb+3)\n return(nb+4)\n\ndef writeF32OnlyTests(config,nb):\n entropyTest(config,nb)\n logsumexpTest(config,nb+1)\n klTest(config,nb+2)\n logSumExpDotTest(config,nb+3)\n return(nb+4)\n\ndef writeF64OnlyTests(config,nb):\n entropyTest(config,nb)\n logsumexpTest(config,nb+1)\n klTest(config,nb+2)\n logSumExpDotTest(config,nb+3)\n return(nb+4)\n\n# For index in min and max we need to ensure that the difference between values\n# of the input is big enough to be representable on q31, q15 or q7.\n# Otherwise python will compute an index different from the one\n# computed by CMSIS which is normal but then the CMSIS test will fail.\n\n#vfunc = np.vectorize(squarer)\n\ndef floatRound(x,f):\n return(np.round(x * 2**f)/2**f)\n\n# Min / Max tests\ndef generateMaxTests(config,nb,format,data):\n\n \n indexes=[]\n maxvals=[]\n\n nbiters = Tools.loopnb(format,Tools.TAILONLY)\n index=np.argmax(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYONLY)\n index=np.argmax(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYANDTAIL)\n index=np.argmax(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n if format == 7:\n # Force max at position 280\n \n nbiters = 280\n \n data = np.zeros(nbiters)\n \n data[nbiters-1] = 0.9 \n data[nbiters-2] = 0.8 \n \n index=np.argmax(data[0:nbiters])\n maxvalue=data[index]\n \n indexes.append(index)\n maxvals.append(maxvalue)\n\n config.writeInput(nb, data,\"InputMaxIndexMax\")\n\n config.writeReference(nb, maxvals,\"MaxVals\")\n config.writeInputS16(nb, indexes,\"MaxIndexes\")\n return(nb+1)\n\ndef generateMinTests(config,nb,format,data):\n\n \n indexes=[]\n maxvals=[]\n\n nbiters = Tools.loopnb(format,Tools.TAILONLY)\n index=np.argmin(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYONLY)\n index=np.argmin(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYANDTAIL)\n index=np.argmin(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n if format == 7:\n # Force max at position 280\n nbiters = 280\n \n data = 0.9*np.ones(nbiters)\n \n data[nbiters-1] = 0.0 \n data[nbiters-2] = 0.1 \n \n index=np.argmin(data[0:nbiters])\n maxvalue=data[index]\n \n indexes.append(index)\n maxvals.append(maxvalue)\n \n \n config.writeInput(nb, data,\"InputMinIndexMax\")\n config.writeReference(nb, maxvals,\"MinVals\")\n config.writeInputS16(nb, indexes,\"MinIndexes\")\n return(nb+1)\n\n# Min/Max Abs Tests\ndef generateMaxAbsTests(config,nb,format,data):\n data = np.abs(data)\n \n indexes=[]\n maxvals=[]\n\n nbiters = Tools.loopnb(format,Tools.TAILONLY)\n index=np.argmax(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYONLY)\n index=np.argmax(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYANDTAIL)\n index=np.argmax(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n if format == 7:\n # Force max at position 280\n \n nbiters = 280\n \n data = np.zeros(nbiters)\n \n data[nbiters-1] = 0.9 \n data[nbiters-2] = 0.8 \n \n index=np.argmax(data[0:nbiters])\n maxvalue=data[index]\n \n indexes.append(index)\n maxvals.append(maxvalue)\n\n config.writeInput(nb, data,\"InputAbsMaxIndexMax\")\n\n config.writeReference(nb, maxvals,\"AbsMaxVals\")\n config.writeInputS16(nb, indexes,\"AbsMaxIndexes\")\n return(nb+1)\n\ndef generateMinAbsTests(config,nb,format,data):\n data = np.abs(data)\n \n indexes=[]\n maxvals=[]\n\n nbiters = Tools.loopnb(format,Tools.TAILONLY)\n index=np.argmin(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYONLY)\n index=np.argmin(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYANDTAIL)\n index=np.argmin(data[0:nbiters])\n maxvalue=data[index]\n\n indexes.append(index)\n maxvals.append(maxvalue)\n\n if format == 7:\n # Force max at position 280\n nbiters = 280\n \n data = 0.9*np.ones(nbiters)\n \n data[nbiters-1] = 0.0 \n data[nbiters-2] = 0.1 \n \n index=np.argmin(data[0:nbiters])\n maxvalue=data[index]\n \n indexes.append(index)\n maxvals.append(maxvalue)\n \n \n config.writeInput(nb, data,\"InputAbsMinIndexMax\")\n config.writeReference(nb, maxvals,\"AbsMinVals\")\n config.writeInputS16(nb, indexes,\"AbsMinIndexes\")\n return(nb+1)\n\ndef averageTest(format,data):\n return(np.average(data))\n\ndef powerTest(format,data):\n if format == 31:\n return(np.dot(data,data) / 2**15) # CMSIS is 2.28 format\n elif format == 15:\n return(np.dot(data,data) / 2**33) # CMSIS is 34.30 format\n elif format == 7:\n return(np.dot(data,data) / 2**17) # CMSIS is 18.14 format\n else:\n return(np.dot(data,data))\n\ndef rmsTest(format,data):\n return(math.sqrt(np.dot(data,data)/data.size))\n\ndef stdTest(format,data):\n return(np.std(data,ddof=1))\n\ndef varTest(format,data):\n return(np.var(data,ddof=1))\n\ndef generateFuncTests(config,nb,format,data,func,name):\n\n funcvals=[]\n\n nbiters = Tools.loopnb(format,Tools.TAILONLY)\n funcvalue=func(format,data[0:nbiters])\n funcvals.append(funcvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYONLY)\n funcvalue=func(format,data[0:nbiters])\n funcvals.append(funcvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYANDTAIL)\n funcvalue=func(format,data[0:nbiters])\n funcvals.append(funcvalue)\n\n nbiters = 100\n funcvalue=func(format,data[0:nbiters])\n funcvals.append(funcvalue)\n\n config.writeReference(nb, funcvals,name)\n return(nb+1)\n\ndef generatePowerTests(config,nb,format,data):\n\n funcvals=[]\n\n nbiters = Tools.loopnb(format,Tools.TAILONLY)\n funcvalue=powerTest(format,data[0:nbiters])\n funcvals.append(funcvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYONLY)\n funcvalue=powerTest(format,data[0:nbiters])\n funcvals.append(funcvalue)\n\n nbiters = Tools.loopnb(format,Tools.BODYANDTAIL)\n funcvalue=powerTest(format,data[0:nbiters])\n funcvals.append(funcvalue)\n\n if format==31 or format==15:\n config.writeReferenceQ63(nb, funcvals,\"PowerVals\")\n elif format==7:\n config.writeReferenceQ31(nb, funcvals,\"PowerVals\")\n else:\n config.writeReference(nb, funcvals,\"PowerVals\")\n return(nb+1)\n\ndef writeTests(config,nb,format):\n NBSAMPLES = 300\n data1=np.random.randn(NBSAMPLES)\n data2=np.random.randn(NBSAMPLES)\n \n data1 = Tools.normalize(data1)\n data2 = np.abs(data1)\n\n # Force quantization so that computation of indexes\n # in min/max is coherent between Python and CMSIS.\n # Otherwise there will be normal differences and the test\n # will be displayed as failed.\n if format==31:\n data1=floatRound(data1,31)\n\n if format==15:\n data1=floatRound(data1,15)\n\n if format==7:\n data1=floatRound(data1,7)\n\n config.writeInput(1, data1,\"Input\")\n config.writeInput(2, data2,\"Input\")\n\n nb=generateMaxTests(config,nb,format,data1)\n nb=generateFuncTests(config,nb,format,data2,averageTest,\"MeanVals\")\n nb=generateMinTests(config,nb,format,data1)\n nb=generatePowerTests(config,nb,format,data1)\n nb=generateFuncTests(config,nb,format,data1,rmsTest,\"RmsVals\")\n nb=generateFuncTests(config,nb,format,data1,stdTest,\"StdVals\")\n nb=generateFuncTests(config,nb,format,data1,varTest,\"VarVals\")\n return(nb)\n\n# We don't want to change ID number of existing tests.\n# So new tests have to be added after existing ones\ndef writeNewsTests(config,nb,format):\n NBSAMPLES = 300\n data1=np.random.randn(NBSAMPLES)\n \n data1 = Tools.normalize(data1)\n\n config.writeInput(1, data1,\"InputNew\")\n nb=generateMaxAbsTests(config,nb,format,data1)\n nb=generateMinAbsTests(config,nb,format,data1)\n\n\ndef generateBenchmark(config,format):\n NBSAMPLES = 256\n data1=np.random.randn(NBSAMPLES)\n data2=np.random.randn(NBSAMPLES)\n \n data1 = Tools.normalize(data1)\n data2 = np.abs(data1)\n\n if format==31:\n data1=floatRound(data1,31)\n\n if format==15:\n data1=floatRound(data1,15)\n\n if format==7:\n data1=floatRound(data1,7)\n\n config.writeInput(1, data1,\"InputBench\")\n config.writeInput(2, data2,\"InputBench\")\n\n\ndef generatePatterns():\n PATTERNDIR = os.path.join(\"Patterns\",\"DSP\",\"Stats\",\"Stats\")\n PARAMDIR = os.path.join(\"Parameters\",\"DSP\",\"Stats\",\"Stats\")\n \n configf64=Tools.Config(PATTERNDIR,PARAMDIR,\"f64\")\n configf32=Tools.Config(PATTERNDIR,PARAMDIR,\"f32\")\n configf16=Tools.Config(PATTERNDIR,PARAMDIR,\"f16\")\n configq31=Tools.Config(PATTERNDIR,PARAMDIR,\"q31\")\n configq15=Tools.Config(PATTERNDIR,PARAMDIR,\"q15\")\n configq7 =Tools.Config(PATTERNDIR,PARAMDIR,\"q7\")\n \n configf32.setOverwrite(False)\n configf16.setOverwrite(False)\n configq31.setOverwrite(False)\n configq15.setOverwrite(False)\n configq7.setOverwrite(False)\n\n #nb=writeTests(configf32,1,0)\n #nb=writeF32OnlyTests(configf32,22)\n #writeNewsTests(configf32,nb,Tools.F32)\n\n nb=writeTests(configf64,1,Tools.F64)\n nb=writeF64OnlyTests(configf64,22)\n writeNewsTests(configf64,nb,Tools.F64)\n\n #nb=writeTests(configq31,1,31)\n #writeNewsTests(configq31,nb,Tools.Q31)\n#\n #nb=writeTests(configq15,1,15)\n #writeNewsTests(configq15,nb,Tools.Q15)\n#\n #nb=writeTests(configq7,1,7)\n #writeNewsTests(configq7,nb,Tools.Q7)\n#\n #nb=writeTests(configf16,1,16)\n #nb=writeF16OnlyTests(configf16,22)\n #writeNewsTests(configf16,nb,Tools.F16)\n\n generateBenchmark(configf64, Tools.F64)\n generateBenchmark(configf32, Tools.F32)\n generateBenchmark(configf16, Tools.F16)\n generateBenchmark(configq31, Tools.Q31)\n generateBenchmark(configq15, Tools.Q15)\n generateBenchmark(configq7, Tools.Q7)\n\nif __name__ == '__main__':\n generatePatterns()\n",
"# Process the test results\n# Test status (like passed, or failed with error code)\n\nimport argparse\nimport re \nimport TestScripts.NewParser as parse\nimport TestScripts.CodeGen\nfrom collections import deque\nimport os.path\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport csv\nimport TestScripts.Deprecate as d\nimport sqlite3\nimport datetime, time\nimport re \n\n# For sql table creation\nMKSTRFIELD=['Regression']\nMKBOOLFIELD=['HARDFP', 'FASTMATH', 'NEON', 'HELIUM','UNROLL', 'ROUNDING','OPTIMIZED']\nMKINTFIELD=['ID','MAX']\nMKREALFIELD=['MAXREGCOEF']\nMKDATEFIELD=[]\nMKKEYFIELD=['DATE','NAME','CATEGORY', 'PLATFORM', 'CORE', 'COMPILER','TYPE','RUN']\nMKKEYFIELDID={'CATEGORY':'categoryid', \n 'NAME':'testnameid',\n 'DATE':'testdateid',\n 'PLATFORM':'platformid', \n 'CORE':'coreid', \n 'COMPILER':'compilerid',\n 'TYPE':'typeid',\n 'RUN':'runid'}\n\n# For csv table value extraction\nVALSTRFIELD=['TESTNAME','VERSION','Regression']\nVALBOOLFIELD=['HARDFP', 'FASTMATH', 'NEON', 'HELIUM','UNROLL', 'ROUNDING','OPTIMIZED']\nVALINTFIELD=['ID', 'MAX']\nVALREALFIELD=['MAXREGCOEF']\nVALDATEFIELD=[]\n# Some of those fields may be created by the parsing of other fields\nVALKEYFIELD=['DATE','NAME','CATEGORY', 'PLATFORM', 'CORE', 'COMPILER','TYPE']\n\ndef joinit(iterable, delimiter):\n it = iter(iterable)\n yield next(it)\n for x in it:\n yield delimiter\n yield x\n\ndef tableExists(c,tableName):\n req=(tableName,)\n r=c.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name=?\",req)\n return(r.fetchone() != None)\n\ndef diff(first, second):\n second = set(second)\n return [item for item in first if item not in second]\n\ndef getColumns(elem,full):\n colsToKeep=[]\n cols = list(full.columns)\n params=diff(elem.params.full , elem.params.summary)\n common = diff(cols + [\"TYPE\",\"RUN\"] , ['OLDID'] + params) \n \n for field in common:\n if field in MKSTRFIELD:\n colsToKeep.append(field)\n if field in MKINTFIELD:\n colsToKeep.append(field)\n if field in MKREALFIELD:\n colsToKeep.append(field)\n if field in MKKEYFIELD:\n colsToKeep.append(field)\n if field in MKDATEFIELD:\n colsToKeep.append(field)\n if field in MKBOOLFIELD:\n colsToKeep.append(field)\n return(colsToKeep)\n\ndef createTableIfMissing(conn,elem,tableName,full):\n if not tableExists(conn,tableName):\n sql = \"CREATE TABLE %s (\" % tableName\n cols = list(full.columns)\n params=diff(elem.params.full , elem.params.summary)\n common = diff(cols + [\"TYPE\",\"RUN\"] , ['OLDID'] + params)\n\n sql += \"%sid INTEGER PRIMARY KEY\" % (tableName)\n start = \",\" \n\n for field in params:\n sql += \" %s\\n %s INTEGER\" % (start,field)\n start = \",\"\n\n for field in common:\n if field in MKSTRFIELD:\n sql += \"%s\\n %s TEXT\" % (start,field)\n if field in MKINTFIELD:\n sql += \"%s\\n %s INTEGER\" % (start,field)\n if field in MKREALFIELD:\n sql += \"%s\\n %s REAL\" % (start,field)\n if field in MKKEYFIELD:\n sql += \"%s\\n %s INTEGER\" % (start,MKKEYFIELDID[field])\n if field in MKDATEFIELD:\n sql += \"%s\\n %s TEXT\" % (start,field)\n if field in MKBOOLFIELD:\n sql += \"%s\\n %s INTEGER\" % (start,field)\n start = \",\"\n # Create foreign keys\n sql += \"%sFOREIGN KEY(typeid) REFERENCES TYPE(typeid),\" % start\n sql += \"FOREIGN KEY(categoryid) REFERENCES CATEGORY(categoryid),\"\n sql += \"FOREIGN KEY(testnameid) REFERENCES TESTNAME(testnameid),\"\n sql += \"FOREIGN KEY(testdateid) REFERENCES TESTDATE(testdateid),\"\n sql += \"FOREIGN KEY(platformid) REFERENCES PLATFORM(platformid),\"\n sql += \"FOREIGN KEY(coreid) REFERENCES CORE(coreid),\"\n sql += \"FOREIGN KEY(compilerid) REFERENCES COMPILER(compilerid)\"\n sql += \"FOREIGN KEY(runid) REFERENCES RUN(runid)\"\n sql += \" )\"\n conn.execute(sql)\n\n# Find the key or add it in a table\ndef findInTable(conn,table,keystr,strv,key):\n #print(sql)\n r = conn.execute(\"select %s from %s where %s=?\" % (key,table,keystr),(strv,))\n result=r.fetchone()\n if result != None:\n return(result[0])\n else:\n conn.execute(\"INSERT INTO %s(%s) VALUES(?)\" % (table,keystr),(strv,))\n conn.commit()\n r = conn.execute(\"select %s from %s where %s=?\" % (key,table,keystr),(strv,))\n result=r.fetchone()\n if result != None:\n #print(result)\n return(result[0])\n else:\n return(None)\n\ndef findInCompilerTable(conn,kind,version):\n #print(sql)\n r = conn.execute(\"select compilerid from COMPILER where compilerkindid=? AND version=?\" , (kind,version))\n result=r.fetchone()\n if result != None:\n return(result[0])\n else:\n fullDate = datetime.datetime.now()\n dateid = findInTable(conn,\"TESTDATE\",\"date\",str(fullDate),\"testdateid\")\n conn.execute(\"INSERT INTO COMPILER(compilerkindid,version,testdateid) VALUES(?,?,?)\" ,(kind,version,dateid))\n conn.commit()\n r = conn.execute(\"select compilerid from COMPILER where compilerkindid=? AND version=? AND testdateid=?\" , (kind,version,dateid))\n result=r.fetchone()\n if result != None:\n #print(result)\n return(result[0])\n else:\n return(None)\n\n\ndef addRows(conn,elem,tableName,full,runid=0):\n # List of columns we have in DB which is\n # different from the columns in the table\n compilerid = 0\n platformid = 0 \n coreid = 0\n keep = getColumns(elem,full)\n cols = list(full.columns)\n params=diff(elem.params.full , elem.params.summary)\n common = diff([\"TYPE\"] + cols , ['OLDID'] + params) \n colNameList = [] \n for c in params + keep:\n if c in MKKEYFIELD:\n colNameList.append(MKKEYFIELDID[c])\n else:\n colNameList.append(c)\n colNames = \"\".join(joinit(colNameList,\",\"))\n #print(colNameList)\n #print(colNames)\n #print(full)\n for index, row in full.iterrows():\n sql = \"INSERT INTO %s(%s) VALUES(\" % (tableName,colNames)\n keys = {}\n\n # Get data from columns\n for field in common:\n if field in VALSTRFIELD:\n keys[field]=row[field]\n if field == \"NAME\":\n name = row[field]\n if field == \"TESTNAME\":\n testname = row[field]\n if re.match(r'^.*_f64',testname):\n keys[\"TYPE\"] = \"f64\"\n if re.match(r'^.*_f32',testname):\n keys[\"TYPE\"] = \"f32\"\n if re.match(r'^.*_f16',testname):\n keys[\"TYPE\"] = \"f16\"\n if re.match(r'^.*_q31',testname):\n keys[\"TYPE\"] = \"q31\"\n if re.match(r'^.*_q15',testname):\n keys[\"TYPE\"] = \"q15\"\n if re.match(r'^.*_q7',testname):\n keys[\"TYPE\"] = \"q7\"\n\n if re.match(r'^.*_s8',testname):\n keys[\"TYPE\"] = \"s8\"\n if re.match(r'^.*_u8',testname):\n keys[\"TYPE\"] = \"u8\"\n if re.match(r'^.*_s16',testname):\n keys[\"TYPE\"] = \"s16\"\n if re.match(r'^.*_u16',testname):\n keys[\"TYPE\"] = \"u16\"\n if re.match(r'^.*_s32',testname):\n keys[\"TYPE\"] = \"s32\"\n if re.match(r'^.*_u32',testname):\n keys[\"TYPE\"] = \"u32\"\n if re.match(r'^.*_s64',testname):\n keys[\"TYPE\"] = \"s64\"\n if re.match(r'^.*_u64',testname):\n keys[\"TYPE\"] = \"u64\"\n \n if field in VALINTFIELD:\n keys[field]=row[field]\n if field in VALREALFIELD:\n keys[field]=row[field]\n if field in VALDATEFIELD:\n keys[field]=row[field]\n if field in VALBOOLFIELD:\n keys[field]=row[field]\n \n keys['RUN']=runid\n # Get foreign keys and create missing data\n for field in common:\n if field in VALKEYFIELD:\n if field == \"CATEGORY\":\n # Remove type extension to get category name so that\n # all types are maped to same category which will\n # help for post processing.\n testField=re.sub(r'^(.*)[:]([^:]+)(F16|F32|F64|Q31|Q15|Q7)$',r'\\1',row[field])\n val = findInTable(conn,\"CATEGORY\",\"category\",testField,\"categoryid\")\n keys[field]=val\n if field == \"NAME\":\n val = findInTable(conn,\"TESTNAME\",\"name\",row[field],\"testnameid\")\n keys[field]=val\n if field == \"DATE\":\n val = findInTable(conn,\"TESTDATE\",\"date\",str(row[field]),\"testdateid\")\n keys[field]=val\n if field == \"CORE\":\n val = findInTable(conn,\"CORE\",\"coredef\",row[field],\"coreid\")\n keys[field]=val\n coreid = val\n if field == \"PLATFORM\":\n val = findInTable(conn,\"PLATFORM\",\"platform\",row[field],\"platformid\")\n keys[field]=val\n platformid = val\n if field == \"TYPE\":\n val = findInTable(conn,\"TYPE\",\"type\",keys[\"TYPE\"],\"typeid\")\n keys[field]=val\n if field == \"COMPILER\":\n compilerkind = findInTable(conn,\"COMPILERKIND\",\"compiler\",row[field],\"compilerkindid\")\n compiler = findInCompilerTable(conn,compilerkind,keys[\"VERSION\"])\n keys[field]=compiler\n compilerid = compiler\n\n # Generate sql command\n start = \"\" \n for field in params:\n sql += \" %s\\n %d\" % (start,row[field])\n start = \",\"\n \n for field in keep:\n if field in MKSTRFIELD or field in MKDATEFIELD:\n sql += \" %s\\n \\\"%s\\\"\" % (start,keys[field])\n elif field in keep:\n if field in VALREALFIELD:\n sql += \" %s\\n %f\" % (start,keys[field])\n else:\n sql += \" %s\\n %d\" % (start,keys[field])\n start = \",\"\n\n sql += \" )\"\n #print(sql)\n conn.execute(sql) \n conn.commit() \n return({'compilerid':compilerid,'platformid':platformid,'coreid':coreid})\n\ndef addConfig(conn,config,fullDate):\n dateid = findInTable(conn,\"TESTDATE\",\"date\",str(fullDate),\"testdateid\")\n conn.execute(\"INSERT INTO CONFIG(compilerid,platformid,coreid,testdateid) VALUES(?,?,?,?)\" ,(config['compilerid'],config['platformid'],config['coreid'],dateid))\n conn.commit()\n\ndef getGroup(a):\n return(re.sub(r'^(.+)(F64|F32|F16|Q31|Q15|Q7|U32|U16|U8|S32|S16|S8)$',r'\\1',a))\n\ndef addOneBenchmark(elem,fullPath,db,group,runid):\n if os.path.isfile(fullPath):\n full=pd.read_csv(fullPath,dtype={'OLDID': str} ,keep_default_na = False)\n fullDate = datetime.datetime.now()\n full['DATE'] = fullDate\n if group:\n tableName = getGroup(group)\n else:\n tableName = getGroup(elem.data[\"class\"])\n conn = sqlite3.connect(db)\n createTableIfMissing(conn,elem,tableName,full)\n config = addRows(conn,elem,tableName,full,runid)\n addConfig(conn,config,fullDate)\n conn.close()\n\n\ndef addToDB(benchmark,dbpath,elem,group,runid):\n if not elem.data[\"deprecated\"]:\n if elem.params:\n benchPath = os.path.join(benchmark,elem.fullPath(),\"regression.csv\")\n print(\"Processing %s\" % benchPath)\n addOneBenchmark(elem,benchPath,dbpath,group,runid)\n \n for c in elem.children:\n addToDB(benchmark,dbpath,c,group,runid)\n\n\n\nparser = argparse.ArgumentParser(description='Generate summary benchmarks')\n\nparser.add_argument('-f', nargs='?',type = str, default=\"Output.pickle\", help=\"Pickle path\")\nparser.add_argument('-b', nargs='?',type = str, default=\"FullBenchmark\", help=\"Full Benchmark dir path\")\n#parser.add_argument('-e', action='store_true', help=\"Embedded test\")\nparser.add_argument('-o', nargs='?',type = str, default=\"reg.db\", help=\"Regression benchmark database\")\nparser.add_argument('-r', nargs='?',type = int, default=0, help=\"Run ID\")\n\nparser.add_argument('others', nargs=argparse.REMAINDER, help=\"Suite class\")\n\nargs = parser.parse_args()\n\nif args.f is not None:\n #p = parse.Parser()\n # Parse the test description file\n #root = p.parse(args.f)\n root=parse.loadRoot(args.f)\n d.deprecate(root,args.others)\n if args.others:\n group=args.others[0] \n else:\n group=None\n addToDB(args.b,args.o,root,group,args.r)\n \nelse:\n parser.print_help()"
] | [
[
"sklearn.naive_bayes.GaussianNB",
"matplotlib.font_manager.FontProperties",
"numpy.ones",
"numpy.concatenate",
"numpy.size",
"numpy.random.randn",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.text",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"numpy.log",
"numpy.sum",
"numpy.abs",
"numpy.ones",
"numpy.round",
"numpy.std",
"numpy.argmax",
"numpy.argmin",
"numpy.random.randn",
"numpy.random.rand",
"numpy.average",
"numpy.var",
"scipy.stats.entropy",
"numpy.array",
"numpy.zeros",
"scipy.special.logsumexp"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
allen050883/Project | [
"22a9f1e466e595d8808e59fc58801881f3399df4"
] | [
"kaggle_SIIM-ACR_Pneumothorax_Segmentation/utils/loss.py"
] | [
"import torch\nimport torch.nn.functional as F\n\ndef dice_score(inputs, targets, smooth=1):\n # Flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum() \n dice_score = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth) \n \n return dice_score\n\ndef get_dice_loss(inputs, targets, smooth=1):\n # Flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum() \n dice_loss = 1 - (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth) \n \n return dice_loss\n\ndef get_focal_loss(inputs, targets, alpha=0.8, gamma=2): \n # Flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n # First compute binary cross-entropy \n BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')\n BCE_EXP = torch.exp(-BCE)\n focal_loss = alpha * (1-BCE_EXP)**gamma * BCE\n \n return focal_loss\n\ndef combo_loss(inputs, targets):\n dice_loss = get_dice_loss(inputs, targets)\n BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')\n focal_loss = get_focal_loss(inputs, targets)\n \n return focal_loss - torch.log(1-dice_loss)\n"
] | [
[
"torch.exp",
"torch.nn.functional.binary_cross_entropy",
"torch.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrsiegfried/read-ICESat-2 | [
"1406b92691d284616ca6c9d72646eca4592d1f1d"
] | [
"scripts/interp_sea_level_ICESat2_ATL07.py"
] | [
"#!/usr/bin/env python\nu\"\"\"\ninterp_sea_level_ICESat2_ATL07.py\nWritten by Tyler Sutterley (05/2021)\nInterpolates sea level anomalies (sla), absolute dynamic topography (adt) and\n mean dynamic topography (mdt) to times and locations of ICESat-2 ATL07 data\n\nhttps://www.aviso.altimetry.fr/en/data/products/sea-surface-height-products/\n global/msla-h.html\nftp://ftp.sltac.cls.fr/Core/SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047/\n dataset-duacs-rep-global-merged-allsat-phy-l4-v3\n\nNote that the AVISO sea level data are gzip compressed netCDF4 files\n\nCOMMAND LINE OPTIONS:\n -D X, --directory X: Working data directory\n -V, --verbose: Output information about each created file\n -M X, --mode X: Permission mode of directories and files created\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n pyproj: Python interface to PROJ library\n https://pypi.org/project/pyproj/\n scikit-learn: Machine Learning in Python\n https://scikit-learn.org/stable/index.html\n https://github.com/scikit-learn/scikit-learn\n h5py: Python interface for Hierarchal Data Format 5 (HDF5)\n https://h5py.org\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n\nPROGRAM DEPENDENCIES:\n read_ICESat2_ATL07.py: reads ICESat-2 sea ice height data files\n time.py: utilities for calculating time operations\n utilities.py: download and management utilities for syncing files\n\nUPDATE HISTORY:\n Updated 05/2021: print full path of output filename\n Written 03/2021\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport re\nimport gzip\nimport h5py\nimport pyproj\nimport netCDF4\nimport argparse\nimport datetime\nimport numpy as np\nimport sklearn.neighbors\nimport icesat2_toolkit.time\nfrom icesat2_toolkit.read_ICESat2_ATL07 import read_HDF5_ATL07\n\n#-- PURPOSE: set the hemisphere of interest based on the granule\ndef set_hemisphere(GRANULE):\n if GRANULE in ('10','11','12'):\n projection_flag = 'S'\n elif GRANULE in ('03','04','05'):\n projection_flag = 'N'\n return projection_flag\n\n#-- PURPOSE: interpolates to coordinates with inverse distance weighting\ndef inverse_distance(x, y, z, xi, yi, SEARCH='BallTree', N=10, POWER=2.0):\n #-- number of output points\n npts = len(xi)\n #-- create neighbors object for coordinates\n if (SEARCH == 'BallTree'):\n tree = sklearn.neighbors.BallTree(np.c_[x,y])\n elif (SEARCH == 'KDTree'):\n tree = sklearn.neighbors.KDTree(np.c_[x,y])\n #-- query the search tree to find the N closest points\n dist,indices = tree.query(np.c_[xi,yi], k=N, return_distance=True)\n #-- normalized weights if POWER > 0 (typically between 1 and 3)\n #-- in the inverse distance weighting\n power_inverse_distance = dist**(-POWER)\n s = np.sum(power_inverse_distance, axis=1)\n w = power_inverse_distance/np.broadcast_to(s[:,None],(npts,N))\n #-- calculate interpolated fields by inverse distance weighting\n return np.sum(w*z[indices],axis=1)\n\n#-- PURPOSE interpolate sea level anomalies to lat/lon and then to time\ndef interpolate_sea_level(base_dir, xi, yi, CJD, HEM):\n #-- EPSG projections for converting lat/lon to polar stereographic\n EPSG = dict(N=3413,S=3031)\n #-- pyproj transformer for converting to polar stereographic\n crs1 = pyproj.CRS.from_string('epsg:4326')\n crs2 = pyproj.CRS.from_string(EPSG[HEM])\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n\n #-- interpolate mean dynamic topography\n input_file = 'mdt_cnes_cls2013_global.nc.gz'\n #-- read bytes from compressed file\n fd = gzip.open(os.path.join(base_dir,input_file),'rb')\n #-- dictionary with input fields\n dinput = {}\n #-- read netCDF file for mean dynamic topography\n with netCDF4.Dataset('mdt', mode='r', memory=fd.read()) as fileID:\n dinput['lon'] = fileID['lon'][:].copy()\n dinput['lat'] = fileID['lat'][:].copy()\n dinput['mdt'] = np.ma.array(fileID['mdt'][0,:,:].copy(),\n fill_value=fileID['mdt']._FillValue)\n dinput['mdt'].mask = (dinput['mdt'].data == dinput['mdt'].fill_value)\n #-- close the compressed file objects\n fd.close()\n #-- create 2-D grid coordinates from longitude and latitude vectors\n gridlon,gridlat = np.meshgrid(dinput['lon'],dinput['lat'])\n #-- convert from latitude/longitude into polar stereographic\n xg,yg = transformer.transform(gridlon,gridlat)\n\n #-- reduce to local coordinates to improve computational time\n gridmask = np.logical_not(dinput['mdt'].mask)\n if (HEM.upper() == 'N'):\n gridmask &= (gridlat >= 50.0)\n elif (HEM.upper() == 'S'):\n gridmask &= (gridlat <= -50.0)\n indy,indx = np.nonzero(gridmask)\n #-- calculate mean dynamic topography by inverse distance weighting\n MDT = inverse_distance(xg[indy,indx], yg[indy,indx],\n dinput['mdt'].data[indy,indx], xi, yi)\n\n #-- CNES Julian Days before and after measurement\n CJD1 = np.floor(CJD)\n #-- scale for linearly interpolating to date\n dt = (CJD - CJD1[0])\n #-- output sea level anomaly and absolute dynamic topography\n SLA = np.zeros_like(CJD)\n ADT = np.zeros_like(CJD)\n #-- for the range of dates\n for day in range(2):\n #-- convert from CNES Julians Days to calendar dates for time\n JD1 = CJD1 + day + 2433282.5\n YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(JD1[0],\n FORMAT='tuple', ASTYPE=int)\n #-- sea level directory\n ddir = os.path.join(base_dir, '{0:0.0f}'.format(YY))\n #-- input file for day before the measurement\n regex = re.compile(('dt_global_allsat_phy_l4_{0:4d}{1:02d}{2:02d}_'\n '(\\d{{4}})(\\d{{2}})(\\d{{2}}).nc.gz').format(YY,MM,DD))\n input_file, = [fi for fi in os.listdir(ddir) if regex.match(fi)]\n #-- dictionary with input fields\n dinput = {}\n #-- read bytes from compressed file\n fd = gzip.open(os.path.join(ddir,input_file),'rb')\n #-- read netCDF file for time\n with netCDF4.Dataset('sla', mode='r', memory=fd.read()) as fileID:\n dinput['lon'] = fileID['lon'][:].copy()\n dinput['lat'] = fileID['lat'][:].copy()\n dinput['sla'] = np.ma.array(fileID['sla'][0,:,:].copy(),\n fill_value=fileID['sla']._FillValue)\n dinput['adt'] = np.ma.array(fileID['adt'][0,:,:].copy(),\n fill_value=fileID['adt']._FillValue)\n #-- close the compressed file objects\n fd.close()\n #-- for each variable to interpolate\n out = {}\n for var in ['sla','adt']:\n #-- reduce to local coordinates to improve computational time\n gridmask = np.logical_not(dinput[var].mask)\n if (HEM.upper() == 'N'):\n gridmask &= (gridlat >= 50.0)\n elif (HEM.upper() == 'S'):\n gridmask &= (gridlat <= -50.0)\n indy,indx = np.nonzero(gridmask)\n #-- calculate variable by inverse distance weighting\n out[var] = inverse_distance(xg[indy,indx], yg[indy,indx],\n dinput[var].data[indy,indx], xi, yi)\n #-- linearly interpolate to date for iteration\n SLA += out['sla']*(2.0*dt*day - dt - day + 1.0)\n ADT += out['adt']*(2.0*dt*day - dt - day + 1.0)\n #-- return interpolated values\n return dict(h_mdt=MDT,h_sla=SLA,h_adt=ADT)\n\n#-- PURPOSE: read ICESat-2 sea ice height (ATL07) from NSIDC\n#-- interpolate AVISO sea level at points and times\ndef interp_sea_level_ICESat2(base_dir, FILE, VERBOSE=False, MODE=0o775):\n\n #-- read data from input_file\n print('{0} -->'.format(os.path.basename(FILE))) if VERBOSE else None\n IS2_atl07_mds,IS2_atl07_attrs,IS2_atl07_beams = read_HDF5_ATL07(FILE,\n ATTRIBUTES=True)\n DIRECTORY = os.path.dirname(FILE)\n #-- extract parameters from ICESat-2 ATLAS HDF5 sea ice file name\n rx = re.compile(r'(processed_)?(ATL\\d{2})-(\\d{2})_(\\d{4})(\\d{2})(\\d{2})'\n r'(\\d{2})(\\d{2})(\\d{2})_(\\d{4})(\\d{2})(\\d{2})_(\\d{3})_(\\d{2})(.*?).h5$')\n SUB,PRD,HMN,YY,MM,DD,HH,MN,SS,TRK,CYCL,SN,RL,VERS,AUX=rx.findall(FILE).pop()\n #-- set the hemisphere flag based on ICESat-2 granule\n HEM = set_hemisphere(HMN)\n\n #-- HDF5 file attributes\n attrib = {}\n #-- mean dynamic topography\n attrib['h_mdt'] = {}\n attrib['h_mdt']['long_name'] = 'Mean Dynamic Topography'\n attrib['h_mdt']['description'] = 'Sea surface height above geoid'\n attrib['h_mdt']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n #-- sea level anomalies\n attrib['h_sla'] = {}\n attrib['h_sla']['long_name'] = 'Sea Level Anomaly'\n attrib['h_sla']['description'] = 'Sea surface anomalies'\n attrib['h_sla']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n #-- absolute dynamic topography\n attrib['h_adt'] = {}\n attrib['h_adt']['long_name'] = 'Absolute Dynamic Topography'\n attrib['h_adt']['description'] = ('Sea surface height above geoid calculated '\n 'by adding the mean dynamic topography to the sea level anomalies')\n attrib['h_adt']['reference'] = ('https://www.aviso.altimetry.fr/en/data/'\n 'products/sea-surface-height-products/global/msla-h.html')\n\n #-- EPSG projections for converting lat/lon to polar stereographic\n EPSG = dict(N=3413,S=3031)\n #-- pyproj transformer for converting to polar stereographic\n crs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\n crs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(EPSG[HEM]))\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n\n #-- number of GPS seconds between the GPS epoch\n #-- and ATLAS Standard Data Product (SDP) epoch\n atlas_sdp_gps_epoch = IS2_atl07_mds['ancillary_data']['atlas_sdp_gps_epoch']\n\n #-- copy variables for outputting to HDF5 file\n IS2_atl07_corr = {}\n IS2_atl07_fill = {}\n IS2_atl07_dims = {}\n IS2_atl07_corr_attrs = {}\n #-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)\n #-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)\n #-- Add this value to delta time parameters to compute full gps_seconds\n IS2_atl07_corr['ancillary_data'] = {}\n IS2_atl07_corr_attrs['ancillary_data'] = {}\n for key in ['atlas_sdp_gps_epoch']:\n #-- get each HDF5 variable\n IS2_atl07_corr['ancillary_data'][key] = IS2_atl07_mds['ancillary_data'][key]\n #-- Getting attributes of group and included variables\n IS2_atl07_corr_attrs['ancillary_data'][key] = {}\n for att_name,att_val in IS2_atl07_attrs['ancillary_data'][key].items():\n IS2_atl07_corr_attrs['ancillary_data'][key][att_name] = att_val\n #-- for each input beam within the file\n for gtx in sorted(IS2_atl07_beams):\n #-- output data dictionaries for beam\n IS2_atl07_corr[gtx] = dict(sea_ice_segments={})\n IS2_atl07_fill[gtx] = dict(sea_ice_segments={})\n IS2_atl07_dims[gtx] = dict(sea_ice_segments={})\n IS2_atl07_corr_attrs[gtx] = dict(sea_ice_segments={})\n\n #-- number of segments\n val = IS2_atl07_mds[gtx]['sea_ice_segments']\n n_seg = len(val['height_segment_id'])\n\n #-- convert time from ATLAS SDP to CNES JD\n #-- days relative to 1950-01-01T00:00:00\n gps_seconds = atlas_sdp_gps_epoch + val['delta_time']\n leap_seconds = icesat2_toolkit.time.count_leap_seconds(gps_seconds)\n cnes_time = icesat2_toolkit.time.convert_delta_time(gps_seconds-leap_seconds,\n epoch1=(1980,1,6,0,0,0), epoch2=(1950,1,1,0,0,0), scale=1.0/86400.0)\n\n #-- extract lat/lon and convert to polar stereographic\n X,Y = transformer.transform(val['longitude'],val['latitude'])\n\n #-- interpolate sea level anomalies and dynamic topographies\n interp = interpolate_sea_level(base_dir,X,Y,cnes_time,HEM)\n\n #-- group attributes for beam\n IS2_atl07_corr_attrs[gtx]['Description'] = IS2_atl07_attrs[gtx]['Description']\n IS2_atl07_corr_attrs[gtx]['atlas_pce'] = IS2_atl07_attrs[gtx]['atlas_pce']\n IS2_atl07_corr_attrs[gtx]['atlas_beam_type'] = IS2_atl07_attrs[gtx]['atlas_beam_type']\n IS2_atl07_corr_attrs[gtx]['groundtrack_id'] = IS2_atl07_attrs[gtx]['groundtrack_id']\n IS2_atl07_corr_attrs[gtx]['atmosphere_profile'] = IS2_atl07_attrs[gtx]['atmosphere_profile']\n IS2_atl07_corr_attrs[gtx]['atlas_spot_number'] = IS2_atl07_attrs[gtx]['atlas_spot_number']\n IS2_atl07_corr_attrs[gtx]['sc_orientation'] = IS2_atl07_attrs[gtx]['sc_orientation']\n #-- group attributes for sea_ice_segments\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['Description'] = (\"Top group for sea \"\n \"ice segments as computed by the ATBD algorithm.\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['data_rate'] = (\"Data within this \"\n \"group are stored at the variable segment rate.\")\n\n #-- geolocation, time and segment ID\n #-- delta time\n IS2_atl07_corr[gtx]['sea_ice_segments']['delta_time'] = val['delta_time'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['delta_time'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['delta_time'] = None\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['units'] = \"seconds since 2018-01-01\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['long_name'] = \"Elapsed GPS seconds\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['standard_name'] = \"time\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['source'] = \"telemetry\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['calendar'] = \"standard\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['description'] = (\"Number of \"\n \"GPS seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch \"\n \"offset is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS \"\n \"seconds between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP \"\n \"epoch. By adding the offset contained within atlas_sdp_gps_epoch to delta time \"\n \"parameters, the time in gps_seconds relative to the GPS epoch can be computed.\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['delta_time']['coordinates'] = \\\n \"height_segment_id latitude longitude\"\n #-- latitude\n IS2_atl07_corr[gtx]['sea_ice_segments']['latitude'] = val['latitude'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['latitude'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['latitude'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['units'] = \"degrees_north\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['contentType'] = \"physicalMeasurement\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['long_name'] = \"Latitude\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['standard_name'] = \"latitude\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['description'] = (\"Latitude of \"\n \"segment center\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['valid_min'] = -90.0\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['valid_max'] = 90.0\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['latitude']['coordinates'] = \\\n \"height_segment_id delta_time longitude\"\n #-- longitude\n IS2_atl07_corr[gtx]['sea_ice_segments']['longitude'] = val['longitude'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['longitude'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['longitude'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['units'] = \"degrees_east\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['contentType'] = \"physicalMeasurement\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['long_name'] = \"Longitude\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['standard_name'] = \"longitude\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['description'] = (\"Longitude of \"\n \"segment center\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['valid_min'] = -180.0\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['valid_max'] = 180.0\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['longitude']['coordinates'] = \\\n \"height_segment_id delta_time latitude\"\n #-- segment ID\n IS2_atl07_corr[gtx]['sea_ice_segments']['height_segment_id'] = val['height_segment_id']\n IS2_atl07_fill[gtx]['sea_ice_segments']['height_segment_id'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['height_segment_id'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['units'] = \"1\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['long_name'] = \\\n \"Identifier of each height segment\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['description'] = \\\n \"Identifier of each height segment\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['height_segment_id']['coordinates'] = \\\n \"delta_time latitude longitude\"\n #-- geolocation segment beginning\n IS2_atl07_corr[gtx]['sea_ice_segments']['geoseg_beg'] = val['geoseg_beg'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['geoseg_beg'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['geoseg_beg'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['units'] = \"1\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['long_name'] = \"Beginning GEOSEG\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['description'] = \\\n \"Geolocation segment (geoseg) ID associated with the first photon used in this sea ice segment\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_beg']['coordinates'] = \\\n \"height_segment_id delta_time latitude longitude\"\n #-- geolocation segment ending\n IS2_atl07_corr[gtx]['sea_ice_segments']['geoseg_end'] = val['geoseg_end'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['geoseg_end'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['geoseg_end'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['units'] = \"1\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['long_name'] = \"Ending GEOSEG\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['description'] = \\\n \"Geolocation segment (geoseg) ID associated with the last photon used in this sea ice segment\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geoseg_end']['coordinates'] = \\\n \"height_segment_id delta_time latitude longitude\"\n #-- along track distance\n IS2_atl07_corr[gtx]['sea_ice_segments']['seg_dist_x'] = val['seg_dist_x'].copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['seg_dist_x'] = None\n IS2_atl07_dims[gtx]['sea_ice_segments']['seg_dist_x'] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['units'] = \"meters\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['long_name'] = \"Along track distance\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['description'] = \\\n \"Along-track distance from the equator crossing to the segment center.\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['seg_dist_x']['coordinates'] = \\\n \"height_segment_id delta_time latitude longitude\"\n\n #-- geophysical variables\n IS2_atl07_corr[gtx]['sea_ice_segments']['geophysical'] = {}\n IS2_atl07_fill[gtx]['sea_ice_segments']['geophysical'] = {}\n IS2_atl07_dims[gtx]['sea_ice_segments']['geophysical'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical']['Description'] = (\"Contains geophysical \"\n \"parameters and corrections used to correct photon heights for geophysical effects, such as tides.\")\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical']['data_rate'] = (\"Data within this group \"\n \"are stored at the sea_ice_height segment rate.\")\n\n #-- interpolated sea level products\n for key,val in interp.items():\n #-- copy output variables\n sea_level = np.ma.zeros((n_seg))\n sea_level.data[:] = np.copy(val)\n #-- replace nan values with fill value\n sea_level.mask = np.isnan(sea_level.data)\n sea_level.data[sea_level.mask] = sea_level.fill_value\n #-- add to output\n IS2_atl07_corr[gtx]['sea_ice_segments']['geophysical'][key] = sea_level.copy()\n IS2_atl07_fill[gtx]['sea_ice_segments']['geophysical'][key] = sea_level.fill_value\n IS2_atl07_dims[gtx]['sea_ice_segments']['geophysical'][key] = ['delta_time']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key] = {}\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['units'] = \"meters\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['contentType'] = \"referenceInformation\"\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['long_name'] = attrib[key]['long_name']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['description'] = attrib[key]['description']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['source'] = 'AVISO/Copernicus'\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['reference'] = attrib[key]['reference']\n IS2_atl07_corr_attrs[gtx]['sea_ice_segments']['geophysical'][key]['coordinates'] = \\\n \"../height_segment_id ../delta_time ../latitude ../longitude\"\n\n #-- output HDF5 files with interpolated sea level data\n fargs = (PRD,HEM,'AVISO_SEA_LEVEL',YY,MM,DD,HH,MN,SS,TRK,CYCL,SN,RL,VERS,AUX)\n file_format = '{0}-{1}_{2}_{3}{4}{5}{6}{7}{8}_{9}{10}{11}_{12}_{13}{14}.h5'\n output_file = os.path.join(DIRECTORY,file_format.format(*fargs))\n #-- print file information\n print('\\t{0}'.format(output_file)) if VERBOSE else None\n HDF5_ATL07_corr_write(IS2_atl07_corr, IS2_atl07_corr_attrs,\n CLOBBER=True, INPUT=os.path.basename(FILE),\n FILL_VALUE=IS2_atl07_fill, DIMENSIONS=IS2_atl07_dims,\n FILENAME=output_file)\n #-- change the permissions mode\n os.chmod(output_file, MODE)\n\n#-- PURPOSE: outputting the correction values for ICESat-2 data to HDF5\ndef HDF5_ATL07_corr_write(IS2_atl07_corr, IS2_atl07_attrs, INPUT=None,\n FILENAME='', FILL_VALUE=None, DIMENSIONS=None, CLOBBER=False):\n #-- setting HDF5 clobber attribute\n if CLOBBER:\n clobber = 'w'\n else:\n clobber = 'w-'\n\n #-- open output HDF5 file\n fileID = h5py.File(os.path.expanduser(FILENAME), clobber)\n\n #-- create HDF5 records\n h5 = {}\n\n #-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)\n #-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)\n h5['ancillary_data'] = {}\n for k,v in IS2_atl07_corr['ancillary_data'].items():\n #-- Defining the HDF5 dataset variables\n val = 'ancillary_data/{0}'.format(k)\n h5['ancillary_data'][k] = fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, compression='gzip')\n #-- add HDF5 variable attributes\n for att_name,att_val in IS2_atl07_attrs['ancillary_data'][k].items():\n h5['ancillary_data'][k].attrs[att_name] = att_val\n\n #-- write each output beam\n beams = [k for k in IS2_atl07_corr.keys() if bool(re.match(r'gt\\d[lr]',k))]\n for gtx in beams:\n fileID.create_group(gtx)\n #-- add HDF5 group attributes for beam\n for att_name in ['Description','atlas_pce','atlas_beam_type',\n 'groundtrack_id','atmosphere_profile','atlas_spot_number',\n 'sc_orientation']:\n fileID[gtx].attrs[att_name] = IS2_atl07_attrs[gtx][att_name]\n #-- create sea_ice_segments group\n fileID[gtx].create_group('sea_ice_segments')\n h5[gtx] = dict(sea_ice_segments={})\n for att_name in ['Description','data_rate']:\n att_val = IS2_atl07_attrs[gtx]['sea_ice_segments'][att_name]\n fileID[gtx]['sea_ice_segments'].attrs[att_name] = att_val\n\n #-- delta_time, geolocation and segment identification variables\n for k in ['delta_time','latitude','longitude','height_segment_id',\n 'geoseg_beg','geoseg_end','seg_dist_x']:\n #-- values and attributes\n v = IS2_atl07_corr[gtx]['sea_ice_segments'][k]\n attrs = IS2_atl07_attrs[gtx]['sea_ice_segments'][k]\n fillvalue = FILL_VALUE[gtx]['sea_ice_segments'][k]\n #-- Defining the HDF5 dataset variables\n val = '{0}/{1}/{2}'.format(gtx,'sea_ice_segments',k)\n if fillvalue:\n h5[gtx]['sea_ice_segments'][k] = fileID.create_dataset(val,\n np.shape(v), data=v, dtype=v.dtype, fillvalue=fillvalue,\n compression='gzip')\n else:\n h5[gtx]['sea_ice_segments'][k] = fileID.create_dataset(val,\n np.shape(v), data=v, dtype=v.dtype, compression='gzip')\n #-- create or attach dimensions for HDF5 variable\n if DIMENSIONS[gtx]['sea_ice_segments'][k]:\n #-- attach dimensions\n for i,dim in enumerate(DIMENSIONS[gtx]['sea_ice_segments'][k]):\n h5[gtx]['sea_ice_segments'][k].dims[i].attach_scale(\n h5[gtx]['sea_ice_segments'][dim])\n else:\n #-- make dimension\n h5[gtx]['sea_ice_segments'][k].make_scale(k)\n #-- add HDF5 variable attributes\n for att_name,att_val in attrs.items():\n h5[gtx]['sea_ice_segments'][k].attrs[att_name] = att_val\n\n #-- add to geophysical corrections\n key = 'geophysical'\n fileID[gtx]['sea_ice_segments'].create_group(key)\n h5[gtx]['sea_ice_segments'][key] = {}\n for att_name in ['Description','data_rate']:\n att_val=IS2_atl07_attrs[gtx]['sea_ice_segments'][key][att_name]\n fileID[gtx]['sea_ice_segments'][key].attrs[att_name] = att_val\n for k,v in IS2_atl07_corr[gtx]['sea_ice_segments'][key].items():\n #-- attributes\n attrs = IS2_atl07_attrs[gtx]['sea_ice_segments'][key][k]\n fillvalue = FILL_VALUE[gtx]['sea_ice_segments'][key][k]\n #-- Defining the HDF5 dataset variables\n val = '{0}/{1}/{2}/{3}'.format(gtx,'sea_ice_segments',key,k)\n if fillvalue:\n h5[gtx]['sea_ice_segments'][key][k] = \\\n fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, fillvalue=fillvalue, compression='gzip')\n else:\n h5[gtx]['sea_ice_segments'][key][k] = \\\n fileID.create_dataset(val, np.shape(v), data=v,\n dtype=v.dtype, compression='gzip')\n #-- attach dimensions\n for i,dim in enumerate(DIMENSIONS[gtx]['sea_ice_segments'][key][k]):\n h5[gtx]['sea_ice_segments'][key][k].dims[i].attach_scale(\n h5[gtx]['sea_ice_segments'][dim])\n #-- add HDF5 variable attributes\n for att_name,att_val in attrs.items():\n h5[gtx]['sea_ice_segments'][key][k].attrs[att_name] = att_val\n\n #-- HDF5 file title\n fileID.attrs['featureType'] = 'trajectory'\n fileID.attrs['title'] = 'ATLAS/ICESat-2 L3A Sea Ice Height'\n fileID.attrs['summary'] = ('Estimates of the sea ice correction parameters '\n 'needed to interpret and assess the quality of sea height estimates.')\n fileID.attrs['description'] = ('The data set (ATL07) contains along-track '\n 'heights for sea ice and open water leads (at varying length scales) '\n 'relative to the WGS84 ellipsoid (ITRF2014 reference frame) after '\n 'adjustment for geoidal and tidal variations, and inverted barometer '\n 'effects.')\n date_created = datetime.datetime.today()\n fileID.attrs['date_created'] = date_created.isoformat()\n project = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'\n fileID.attrs['project'] = project\n platform = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'\n fileID.attrs['project'] = platform\n #-- add attribute for elevation instrument and designated processing level\n instrument = 'ATLAS > Advanced Topographic Laser Altimeter System'\n fileID.attrs['instrument'] = instrument\n fileID.attrs['source'] = 'Spacecraft'\n fileID.attrs['references'] = 'https://nsidc.org/data/icesat-2'\n fileID.attrs['processing_level'] = '4'\n #-- add attributes for input ATL07 file\n fileID.attrs['input_files'] = os.path.basename(INPUT)\n #-- find geospatial and temporal ranges\n lnmn,lnmx,ltmn,ltmx,tmn,tmx = (np.inf,-np.inf,np.inf,-np.inf,np.inf,-np.inf)\n for gtx in beams:\n lon = IS2_atl07_corr[gtx]['sea_ice_segments']['longitude']\n lat = IS2_atl07_corr[gtx]['sea_ice_segments']['latitude']\n delta_time = IS2_atl07_corr[gtx]['sea_ice_segments']['delta_time']\n #-- setting the geospatial and temporal ranges\n lnmn = lon.min() if (lon.min() < lnmn) else lnmn\n lnmx = lon.max() if (lon.max() > lnmx) else lnmx\n ltmn = lat.min() if (lat.min() < ltmn) else ltmn\n ltmx = lat.max() if (lat.max() > ltmx) else ltmx\n tmn = delta_time.min() if (delta_time.min() < tmn) else tmn\n tmx = delta_time.max() if (delta_time.max() > tmx) else tmx\n #-- add geospatial and temporal attributes\n fileID.attrs['geospatial_lat_min'] = ltmn\n fileID.attrs['geospatial_lat_max'] = ltmx\n fileID.attrs['geospatial_lon_min'] = lnmn\n fileID.attrs['geospatial_lon_max'] = lnmx\n fileID.attrs['geospatial_lat_units'] = \"degrees_north\"\n fileID.attrs['geospatial_lon_units'] = \"degrees_east\"\n fileID.attrs['geospatial_ellipsoid'] = \"WGS84\"\n fileID.attrs['date_type'] = 'UTC'\n fileID.attrs['time_type'] = 'CCSDS UTC-A'\n #-- convert start and end time from ATLAS SDP seconds into GPS seconds\n atlas_sdp_gps_epoch=IS2_atl07_corr['ancillary_data']['atlas_sdp_gps_epoch']\n gps_seconds = atlas_sdp_gps_epoch + np.array([tmn,tmx])\n #-- calculate leap seconds\n leaps = icesat2_toolkit.time.count_leap_seconds(gps_seconds)\n #-- convert from seconds since 1980-01-06T00:00:00 to Modified Julian days\n MJD = icesat2_toolkit.time.convert_delta_time(gps_seconds - leaps,\n epoch1=(1980,1,6,0,0,0), epoch2=(1858,11,17,0,0,0), scale=1.0/86400.0)\n #-- convert to calendar date\n YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(MJD + 2400000.5,\n FORMAT='tuple')\n #-- add attributes with measurement date start, end and duration\n tcs = datetime.datetime(int(YY[0]), int(MM[0]), int(DD[0]),\n int(HH[0]), int(MN[0]), int(SS[0]), int(1e6*(SS[0] % 1)))\n fileID.attrs['time_coverage_start'] = tcs.isoformat()\n tce = datetime.datetime(int(YY[1]), int(MM[1]), int(DD[1]),\n int(HH[1]), int(MN[1]), int(SS[1]), int(1e6*(SS[1] % 1)))\n fileID.attrs['time_coverage_end'] = tce.isoformat()\n fileID.attrs['time_coverage_duration'] = '{0:0.0f}'.format(tmx-tmn)\n #-- Closing the HDF5 file\n fileID.close()\n\n#-- Main program that calls interp_sea_level_ICESat2()\ndef main():\n #-- Read the system arguments listed after the program\n parser = argparse.ArgumentParser(\n description=\"\"\"Interpolates AVISO sea level anomalies, absolute\n dynamic topography and mean dynamic topography to ICESat-2\n ATL07 sea ice height data\n \"\"\"\n )\n #-- command line parameters\n parser.add_argument('infile',\n type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='+',\n help='ICESat-2 ATL07 file to run')\n #-- directory with sea level data\n parser.add_argument('--directory','-D',\n type=lambda p: os.path.abspath(os.path.expanduser(p)),\n default=os.getcwd(),\n help='Working data directory')\n #-- verbosity settings\n #-- verbose will output information about each output file\n parser.add_argument('--verbose','-V',\n default=False, action='store_true',\n help='Output information about each created file')\n #-- permissions mode of the local files (number in octal)\n parser.add_argument('--mode','-M',\n type=lambda x: int(x,base=8), default=0o775,\n help='Permission mode of directories and files created')\n args = parser.parse_args()\n\n #-- run for each input ATL07 file\n for FILE in args.infile:\n interp_sea_level_ICESat2(args.directory, FILE,\n VERBOSE=args.verbose, MODE=args.mode)\n\n#-- run main program\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.logical_not",
"numpy.nonzero",
"numpy.isnan",
"numpy.copy",
"numpy.zeros_like",
"numpy.broadcast_to",
"numpy.floor",
"numpy.ma.zeros",
"numpy.shape",
"numpy.array",
"numpy.meshgrid",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FDU-VTS/Person-Search | [
"36a1eab8d8fdf149e32dece030edff02dbc8a915"
] | [
"models/reid.py"
] | [
"# encoding: utf-8\n\"\"\"\n@author: liaoxingyu\n@contact: [email protected]\n\"\"\"\n\nimport math\n\nimport torch\nfrom torch import nn\nfrom torch.utils import model_zoo\n\nfrom models.context_block import *\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\nmodel_layers = {\n 'resnet50': [3, 4, 6, 3],\n 'resnet101': [3, 4, 23, 3]\n}\n\n__all__ = ['ResNet', 'Bottleneck']\n\n\nclass IBN(nn.Module):\n \"\"\"\n IBN with BN:IN = 7:1\n \"\"\"\n\n def __init__(self, planes):\n super(IBN, self).__init__()\n half1 = int(planes / 8)\n self.half = half1\n half2 = planes - half1\n self.IN = nn.InstanceNorm2d(half1, affine=True)\n self.BN = nn.BatchNorm2d(half2)\n\n def forward(self, x):\n split = torch.split(x, self.half, dim=1)\n out1 = self.IN(split[0].contiguous())\n out2 = self.BN(torch.cat(split[1:], dim=1).contiguous())\n out = torch.cat((out1, out2), 1)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, with_ibn=False, gcb=None, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.with_gcb = gcb is not None\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n\n if with_ibn:\n self.bn1 = IBN(planes)\n else:\n self.bn1 = nn.BatchNorm2d(planes)\n\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n # GCNet\n if self.with_gcb:\n gcb_inplanes = planes * self.expansion\n self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.with_gcb:\n out = self.context_block(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, last_stride, with_ibn, gcb, stage_with_gcb, block, layers):\n scale = 64\n self.inplanes = scale\n super().__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, scale, layers[0], with_ibn=with_ibn,\n gcb=gcb if stage_with_gcb[0] else None)\n self.layer2 = self._make_layer(block, scale * 2, layers[1], stride=2, with_ibn=with_ibn,\n gcb=gcb if stage_with_gcb[1] else None)\n self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2, with_ibn=with_ibn,\n gcb=gcb if stage_with_gcb[2] else None)\n self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=last_stride,\n gcb=gcb if stage_with_gcb[3] else None)\n\n def _make_layer(self, block, planes, blocks, stride=1, with_ibn=False, gcb=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n if planes == 512:\n with_ibn = False\n layers.append(block(self.inplanes, planes, with_ibn, gcb, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, with_ibn, gcb))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n def load_pretrain(self, model_path=''):\n with_model_path = (model_path is not '')\n if not with_model_path: # resnet pretrain\n state_dict = model_zoo.load_url(model_urls[self._model_name])\n state_dict.pop('fc.weight')\n state_dict.pop('fc.bias')\n self.load_state_dict(state_dict)\n else:\n # ibn pretrain\n state_dict = torch.load(model_path)['state_dict']\n state_dict.pop('module.fc.weight')\n state_dict.pop('module.fc.bias')\n new_state_dict = {}\n for k in state_dict:\n new_k = '.'.join(k.split('.')[1:]) # remove module in name\n if self.state_dict()[new_k].shape == state_dict[k].shape:\n new_state_dict[new_k] = state_dict[k]\n state_dict = new_state_dict\n self.load_state_dict(state_dict, strict=False)\n\n def random_init(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n @classmethod\n def from_name(cls, model_name, last_stride, with_ibn, gcb, stage_with_gcb):\n cls._model_name = model_name\n return ResNet(last_stride, with_ibn, gcb, stage_with_gcb, block=Bottleneck, layers=model_layers[model_name])\n\n\n\nclass Baseline(nn.Module):\n in_planes = 2048\n\n def __init__(self,\n backbone,\n num_classes,\n last_stride,\n with_ibn,\n gcb,\n stage_with_gcb,\n pretrain=True,\n model_path=''):\n super().__init__()\n try:\n self.base = ResNet.from_name(backbone, last_stride, with_ibn, gcb, stage_with_gcb)\n except:\n print(f'not support {backbone} backbone')\n\n if pretrain:\n self.base.load_pretrain(model_path)\n\n self.gap = nn.AdaptiveAvgPool2d(1)\n self.num_classes = num_classes\n\n self.bottleneck = nn.BatchNorm1d(self.in_planes)\n self.bottleneck.bias.requires_grad_(False) # no shift\n\n self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)\n\n\n def forward(self, x, label=None):\n base = self.base(x)\n global_feat = self.gap(base) # (b, 2048, 1, 1)\n global_feat = global_feat.view(-1, global_feat.size()[1])\n feat = self.bottleneck(global_feat) # normalize for angular softmax\n return feat, torch.sum(base*feat.unsqueeze(-1).unsqueeze(-1), dim=1)\n\n def load_params_wo_fc(self, state_dict):\n # new_state_dict = {}\n # for k, v in state_dict.items():\n # k = '.'.join(k.split('.')[1:])\n # new_state_dict[k] = v\n # state_dict = new_state_dict\n state_dict.pop('classifier.weight')\n res = self.load_state_dict(state_dict, strict=False)\n assert str(res.missing_keys) == str(['classifier.weight',]), 'issue loading pretrained weights'\nif __name__ == \"__main__\":\n model = Baseline(\n 'resnet50',\n 1453,\n 1,\n True,\n \"ratio\",\n (False, False, False, False),\n pretrain = False,\n model_path = '')\n print(model)"
] | [
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.InstanceNorm2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.split",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaximeSorgenfrei/cat_dog_cnn | [
"bc1301fb683de2111db2c25b9da22608ede8e070"
] | [
"webcam_animal_classifier.py"
] | [
"import cv2\nimport keras\nfrom keras.models import Sequential, Model\nfrom keras.callbacks import EarlyStopping\nfrom keras.optimizers import Adam\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nfrom random import shuffle\n# import tensorflow as tf\nimport time\n\nfile_list = os.listdir(\"./\")\nkeras_model_files = []\nfor file in file_list:\n if file.split(\".\")[-1] in [\"h5\",\"npy\"]:\n print(file)\n keras_model_files.append(file)\n\n# load model from file\nkeras_model_file_i_want_to_use = keras_model_files[0]\nmodel = keras.models.load_model(keras_model_file_i_want_to_use)\nmodel.summary()\n# classes = [\"ape\", \"bear\", \"bee\", \"beetle\", \"bird\", \"bos\", \"canine\", \"deer\", \"elephants\", \"feline\", \"frogs\", \"gekko\", \"golden moles\", \"hare\", \"human\", \"lemur\", \"loris\", \"none\", \"rodent\", \"salamander\", \"scorpions\", \"shark\", \"sheep\", \"snake\", \"spider\", \"squirrel\", \"turtle\", \"whale\"]\n# read directories, resize and label data\n# Write some Text\n\n# dict\nwith open(keras_model_files[1],\"r\") as f:\n class_list = json.load(f)\n class_stats = pd.DataFrame(data={\"classes\":class_list})\n classes = class_stats[\"classes\"].to_dict()\nf.close()\nprint(\"Classes: {}\".format(classes))\nprint(\"Using following model file for predictions:\\n{}\".format(keras_model_file_i_want_to_use))\n\nfont = cv2.FONT_HERSHEY_COMPLEX\nbottomLeftCornerOfText = (50,50)\nbottomLeftCornerOfText2 = (50,75)\nfontScale = 0.5\nfontColor = (255,255,255)\nlineType = 2\n\nwidth, height = 50, 50\ncap_width = 1280\ncap_height = 720\nroi_width = 400\nroi_height = 300\n\nWebCam_cap = cv2.VideoCapture(0)\nWebCam_cap.set(cv2.CAP_PROP_FRAME_WIDTH, cap_width)\nWebCam_cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cap_height)\n\nSETTING_PHOTOFRAME = True\n\nwhile True:\n # get frame\n ret, frame = WebCam_cap.read()\n # print(type(frame), frame.shape)\n try:\n # reduce frame to 50x50 pixles\n # image = cv2.imread(frame, cv2.IMREAD_GRAYSCALE)\n\n if SETTING_PHOTOFRAME:\n roi = np.ones_like(frame)\n roi[int((cap_height-roi_height)/2):-int((cap_height-roi_height)/2), int((cap_width-roi_width)/2):-int((cap_width-roi_width)/2), :] = frame[int((cap_height-roi_height)/2):-int((cap_height-roi_height)/2), int((cap_width-roi_width)/2):-int((cap_width-roi_width)/2), :]\n image = frame[int((cap_height-roi_height)/2):-int((cap_height-roi_height)/2), int((cap_width-roi_width)/2):-int((cap_width-roi_width)/2), :]\n # print(\"image shape: \",image.shape)\n else:\n image = frame\n # resize, turn to gray and reshape for CNN\n image = cv2.resize(image, (height, width), interpolation=cv2.INTER_AREA)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image_to_predict = np.reshape(image, (1, height, width, 1))\n # predict with NN\n pred = model.predict_classes(image_to_predict, verbose=0)\n pred_ = model.predict(image_to_predict, verbose=0)\n prediction = \"{}: {} | {}: {}\".format(classes[0], pred_[0][0], classes[1], pred_[0][1])\n if pred_[0][pred[0]] > 0.30:\n prediction_class = \"Predicted class: {} [{:.2f}]\".format(classes[pred[0]], pred_[0][pred[0]])\n else:\n prediction_class = \"No significant prediction possible!\"\n # print prediction and class to frame\n # cv2.putText(frame, prediction, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)\n if SETTING_PHOTOFRAME:\n cv2.putText(roi, prediction_class, bottomLeftCornerOfText2, font, fontScale, fontColor, lineType)\n else:\n cv2.putText(frame, prediction_class, bottomLeftCornerOfText2, font, fontScale, fontColor, lineType)\n # ax[i].set_title(\"{}: {}-{} ({})\".format(i, pred, classes[pred[0]], np.round(pred_, decimals=4)))\n # display resut\n# cv2.namedWindow(\"Result\", cv2.WINDOW_AUTOSIZE)\n# cv2.imshow(\"Result\", image)\n except Exception as e:\n print(e)\n else:\n cv2.namedWindow(\"WebCam\", cv2.WINDOW_AUTOSIZE)\n if SETTING_PHOTOFRAME:\n cv2.imshow(\"WebCam\", roi)\n else:\n cv2.imshow(\"WebCam\", frame)\n \n if cv2.waitKey(1) & 0xFF==ord(\"q\"):\n break\n \nWebCam_cap.release()\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.reshape",
"numpy.ones_like",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
534ttl3/ctsutils | [
"b070bf349d4a112df576404a3948e0de60f24927",
"b070bf349d4a112df576404a3948e0de60f24927"
] | [
"ctsutils/test.py",
"ctsutils/mpl_slider.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ctsutils.cparameterspace import CParam, CParameterSpace\n\n\ndef foo(X, Y, Y2):\n \"\"\" \"\"\"\n return (1 - X / 2 + X ** 5 + (Y + Y2 ) ** 3) * np.exp(-X ** 2 - (Y + Y2 ) ** 2) # calcul du tableau des valeurs de Z\n\n\ndef foo(X, Y, Y2, Y3):\n \"\"\" \"\"\"\n return (1 - X / 2 + X ** 5 + (Y + Y2 + Y3) ** 3) * np.exp(-X ** 2 - (Y + Y2 + Y3) ** 2) # calcul du tableau des valeurs de Z\n\n\nps = CParameterSpace([CParam(\"x\", np.linspace(-3, 3, 51), unit=\"m\"),\n CParam(\"y\", np.linspace(-2, 2, 41)),\n CParam(\"y2\", np.linspace(-1, 1, 31)),\n CParam(\"y3\", np.linspace(-1, 1, 10))])\n\n# import pdb; pdb.set_trace() # noqa BREAKPOINT\n# x = ps.get_arr(\"x\")\n\nZ = ps.calc_function(foo, args_param_names=(\"x\", \"y\", \"y2\", \"y3\"))\n\nintegrals = ps.calc_integral(Z, \"x\")\n# import pdb; pdb.set_trace() # noqa BREAKPOINT\n\n# fig, ax = plt.subplots(1, 1)\n# ps.plot(Z, ordering_of_params_names=(\"y2\", \"y\"), ax=ax)\n# plt.show()\n\n# import pdb; pdb.set_trace() # noqa BREAKPOINT\n\nfig, ax = plt.subplots(1, 1)\n\n#ps.plot(Z, z_label=\"Z\", ordering_of_params_name_and_value=((\"y3\", None), (\"y2\", None)), ax=ax)\nps.plot(integrals, z_label=\"integrals\", ordering_of_params_name_and_value=((\"y3\", None), (\"y2\", None)), ax=ax)\n\n# ps.plot(integrals, z_label=\"integrals\", ordering_of_params_name_and_value=((\"y2\", None), (\"y\", None)), ax=ax)\n\nplt.show()\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(left=0.25, bottom=0.25)\nt = np.arange(0.0, 1.0, 0.001)\na0 = 5\nf0 = 3\ndelta_f = 5.0\ns = a0 * np.sin(2 * np.pi * f0 * t)\nl, = plt.plot(t, s, lw=2)\nax.margins(x=0)\n\naxcolor = 'lightgoldenrodyellow'\naxfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\naxamp = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)\n\nsfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0, valstep=delta_f)\nsamp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)\n\n\ndef update(val):\n print(val)\n amp = samp.val\n freq = sfreq.val\n l.set_ydata(amp*np.sin(2*np.pi*freq*t))\n fig.canvas.draw_idle()\n\n\nsfreq.on_changed(update)\nsamp.on_changed(update)\n\nresetax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbutton = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\n\ndef reset(event):\n sfreq.reset()\n samp.reset()\nbutton.on_clicked(reset)\n\nrax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)\nradio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)\n\n\ndef colorfunc(label):\n l.set_color(label)\n fig.canvas.draw_idle()\nradio.on_clicked(colorfunc)\n\nplt.show()\n"
] | [
[
"numpy.exp",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.linspace"
],
[
"matplotlib.widgets.Button",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"matplotlib.widgets.Slider",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.widgets.RadioButtons"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sereini/SpeechSeparationModel | [
"ea44c845762112f3bc2e5e54c5530e6fd429464f"
] | [
"preprocessing/embedding/export_FaceEmbedding.py"
] | [
"\"\"\"\nExports the embeddings of a directory of images as numpy arrays.\nFollowing structure:\n D:\\images:\n folder1:\n img_0\n ...\n img_74\n folder2:\n img_0\n ...\n img_74\n \nOutput:\nembeddings.npy -- Embeddings as np array (with names \"folder1\", \"folder2\", etc.)\n\nUse --is_aligned False, if your images aren't already pre-aligned\nUse --image_batch to dictacte how many images to load in memory at a time.\n\n\nStarted with export_embeddings.py from Charles Jekel, and modified the program\nto export the face embeddings for the audio-visual speech separation model. The\npretrained model is from David Sandberg's facenet repository:\n https://github.com/davidsandberg/facenet\nexport_embedding.py from same project:\n https://github.com/davidsandberg/facenet/tree/master/contributed\n\n\nEnsure you have set the PYTHONPATH for the pretrained facenet (3.):\n https://github.com/davidsandberg/facenet/wiki/Validate-on-LFW\nExecution:\n python export_FaceEmbedding.py models\\20180402-114759\\20180402-114759.pb D:\\images --is_aligned False --image_size 160 --gpu_memory_fraction 0.5 --image_batch 75\n\n\nSereina Scherrer 2019\n\"\"\"\n\n# MIT License\n#\n# Copyright (c) 2016 David Sandberg\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nfrom scipy import misc\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport os\nimport argparse\nimport facenet\nimport align.detect_face\nimport re\nimport glob\n\nfrom six.moves import xrange\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\ndef natural_keys(text):\n return [atoi(c) for c in re.split(r'(\\d+)', text)]\n\ndef main(args):\n train_set = facenet.get_dataset(args.data_dir)\n image_list, label_list = facenet.get_image_paths_and_labels(train_set)\n \n # sort the image:s img_0 ... img_74\n image_list.sort(key=natural_keys)\n \n # fetch the classes (labels as strings) exactly as it's done in get_dataset\n path_exp = os.path.expanduser(args.data_dir)\n classes = [path for path in os.listdir(path_exp) \\\n if os.path.isdir(os.path.join(path_exp, path))]\n classes.sort()\n # get the label strings\n label_strings = [name for name in classes if \\\n os.path.isdir(os.path.join(path_exp, name))]\n\n # define path to save the embeddings\n dirs = [\"./emb/embeddings_AVspeech/\"]\n for d in dirs:\n if not os.path.exists(d):\n os.makedirs(d)\n print(\"Folder created:\", d)\n \n with tf.Graph().as_default():\n\n with tf.Session() as sess:\n\n # Load the model\n facenet.load_model(args.model_dir)\n\n # Get input and output tensors\n images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n\n # Run forward pass to calculate embeddings\n nrof_images = len(image_list)\n print('Number of images: ', nrof_images)\n batch_size = args.image_batch\n if nrof_images % batch_size == 0:\n nrof_batches = nrof_images // batch_size\n else:\n nrof_batches = (nrof_images // batch_size) + 1\n print('Number of batches: ', nrof_batches)\n embedding_size = embeddings.get_shape()[1]\n emb_array = np.zeros((nrof_images, embedding_size))\n start_time = time.time()\n\n for i in range(nrof_batches):\n if i == nrof_batches -1:\n n = nrof_images\n else:\n n = i*batch_size + batch_size\n # Get images for the batch\n if args.is_aligned is True:\n images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size)\n else:\n images = load_and_align_data(image_list[i*batch_size:n], args.image_size, args.margin, args.gpu_memory_fraction)\n feed_dict = { images_placeholder: images, phase_train_placeholder:False }\n # Use the facenet model to calcualte embeddings\n embed = sess.run(embeddings, feed_dict=feed_dict)\n emb_array[i*batch_size:n, :] = embed\n \n # export the embedding\n s = dirs[0] + label_strings[i] + \".npy\" \n np.save(s, embed)\n \n print('Completed batch', i+1, 'of', nrof_batches)\n\n run_time = time.time() - start_time\n print('Run time: ', run_time)\n print('Time per video: ',run_time/nrof_batches)\n\n\n\ndef load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):\n\n\n print('Creating networks and loading parameters')\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)\n\n nrof_samples = len(image_paths)\n img_list = [None] * nrof_samples\n for i in xrange(nrof_samples):\n print(image_paths[i])\n img = misc.imread(os.path.expanduser(image_paths[i]))\n \n aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')\n prewhitened = facenet.prewhiten(aligned)\n img_list[i] = prewhitened \n \n # uncomment if you want to save the aligned images\n '''f = os.path.basename(image_paths[i])\n #print(f)\n tmp_folder = re.split(r'\\\\', image_paths[i])\n tmp_f = tmp_folder[-2]\n d = \"./aligned/\" + tmp_f + \"/\"\n if not os.path.exists(d):\n os.makedirs(d)\n print(\"Folder created:\", d)\n \n misc.imsave(d + f, aligned)'''\n \n images = np.stack(img_list)\n return images\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('model_dir', type=str,\n help='Directory containing the meta_file and ckpt_file')\n parser.add_argument('data_dir', type=str,\n help='Directory containing images. If images are not already aligned and cropped include --is_aligned False.')\n parser.add_argument('--is_aligned', type=str,\n help='Is the data directory already aligned and cropped?', default=True)\n parser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels.', default=160)\n parser.add_argument('--margin', type=int,\n help='Margin for the crop around the bounding box (height, width) in pixels.',\n default=44)\n parser.add_argument('--gpu_memory_fraction', type=float,\n help='Upper bound on the amount of GPU memory that will be used by the process.',\n default=1.0)\n parser.add_argument('--image_batch', type=int,\n help='Number of images stored in memory at a time. Default 75.',\n default=75)\n\n return parser.parse_args(argv)\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n"
] | [
[
"scipy.misc.imresize",
"tensorflow.Graph",
"numpy.stack",
"numpy.save",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"tensorflow.Session",
"tensorflow.get_default_graph",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
yoshitomo-matsubara/vision | [
"03d11338f3faf94a0749549912593ddb8b70be17",
"03d11338f3faf94a0749549912593ddb8b70be17"
] | [
"references/detection/utils.py",
"torchvision/models/googlenet.py"
] | [
"import datetime\nimport errno\nimport os\nimport time\nfrom collections import defaultdict, deque\n\nimport torch\nimport torch.distributed as dist\n\n\nclass SmoothedValue:\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({global_avg:.4f})\"\n self.deque = deque(maxlen=window_size)\n self.total = 0.0\n self.count = 0\n self.fmt = fmt\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n def synchronize_between_processes(self):\n \"\"\"\n Warning: does not synchronize the deque!\n \"\"\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor([self.count, self.total], dtype=torch.float64, device=\"cuda\")\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.count = int(t[0])\n self.total = t[1]\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque), dtype=torch.float32)\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n @property\n def max(self):\n return max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n def __str__(self):\n return self.fmt.format(\n median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value\n )\n\n\ndef all_gather(data):\n \"\"\"\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n \"\"\"\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n data_list = [None] * world_size\n dist.all_gather_object(data_list, data)\n return data_list\n\n\ndef reduce_dict(input_dict, average=True):\n \"\"\"\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that all processes\n have the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return input_dict\n with torch.inference_mode():\n names = []\n values = []\n # sort the keys so that they are consistent across processes\n for k in sorted(input_dict.keys()):\n names.append(k)\n values.append(input_dict[k])\n values = torch.stack(values, dim=0)\n dist.all_reduce(values)\n if average:\n values /= world_size\n reduced_dict = {k: v for k, v in zip(names, values)}\n return reduced_dict\n\n\nclass MetricLogger:\n def __init__(self, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{attr}'\")\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(f\"{name}: {str(meter)}\")\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None):\n i = 0\n if not header:\n header = \"\"\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt=\"{avg:.4f}\")\n data_time = SmoothedValue(fmt=\"{avg:.4f}\")\n space_fmt = \":\" + str(len(str(len(iterable)))) + \"d\"\n if torch.cuda.is_available():\n log_msg = self.delimiter.join(\n [\n header,\n \"[{0\" + space_fmt + \"}/{1}]\",\n \"eta: {eta}\",\n \"{meters}\",\n \"time: {time}\",\n \"data: {data}\",\n \"max mem: {memory:.0f}\",\n ]\n )\n else:\n log_msg = self.delimiter.join(\n [header, \"[{0\" + space_fmt + \"}/{1}]\", \"eta: {eta}\", \"{meters}\", \"time: {time}\", \"data: {data}\"]\n )\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == len(iterable) - 1:\n eta_seconds = iter_time.global_avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n if torch.cuda.is_available():\n print(\n log_msg.format(\n i,\n len(iterable),\n eta=eta_string,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB,\n )\n )\n else:\n print(\n log_msg.format(\n i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)\n )\n )\n i += 1\n end = time.time()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print(f\"{header} Total time: {total_time_str} ({total_time / len(iterable):.4f} s / it)\")\n\n\ndef collate_fn(batch):\n return tuple(zip(*batch))\n\n\ndef mkdir(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef setup_for_distributed(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop(\"force\", False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n if is_main_process():\n torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n if \"RANK\" in os.environ and \"WORLD_SIZE\" in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n args.gpu = int(os.environ[\"LOCAL_RANK\"])\n elif \"SLURM_PROCID\" in os.environ:\n args.rank = int(os.environ[\"SLURM_PROCID\"])\n args.gpu = args.rank % torch.cuda.device_count()\n else:\n print(\"Not using distributed mode\")\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = \"nccl\"\n print(f\"| distributed init (rank {args.rank}): {args.dist_url}\", flush=True)\n torch.distributed.init_process_group(\n backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank\n )\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)\n",
"import warnings\nfrom collections import namedtuple\nfrom typing import Optional, Tuple, List, Callable, Any\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom .._internally_replaced_utils import load_state_dict_from_url\nfrom ..utils import _log_api_usage_once\n\n__all__ = [\"GoogLeNet\", \"googlenet\", \"GoogLeNetOutputs\", \"_GoogLeNetOutputs\"]\n\nmodel_urls = {\n # GoogLeNet ported from TensorFlow\n \"googlenet\": \"https://download.pytorch.org/models/googlenet-1378be20.pth\",\n}\n\nGoogLeNetOutputs = namedtuple(\"GoogLeNetOutputs\", [\"logits\", \"aux_logits2\", \"aux_logits1\"])\nGoogLeNetOutputs.__annotations__ = {\"logits\": Tensor, \"aux_logits2\": Optional[Tensor], \"aux_logits1\": Optional[Tensor]}\n\n# Script annotations failed with _GoogleNetOutputs = namedtuple ...\n# _GoogLeNetOutputs set here for backwards compat\n_GoogLeNetOutputs = GoogLeNetOutputs\n\n\nclass GoogLeNet(nn.Module):\n __constants__ = [\"aux_logits\", \"transform_input\"]\n\n def __init__(\n self,\n num_classes: int = 1000,\n aux_logits: bool = True,\n transform_input: bool = False,\n init_weights: Optional[bool] = None,\n blocks: Optional[List[Callable[..., nn.Module]]] = None,\n dropout: float = 0.2,\n dropout_aux: float = 0.7,\n ) -> None:\n super().__init__()\n _log_api_usage_once(self)\n if blocks is None:\n blocks = [BasicConv2d, Inception, InceptionAux]\n if init_weights is None:\n warnings.warn(\n \"The default weight initialization of GoogleNet will be changed in future releases of \"\n \"torchvision. If you wish to keep the old behavior (which leads to long initialization times\"\n \" due to scipy/scipy#11299), please set init_weights=True.\",\n FutureWarning,\n )\n init_weights = True\n assert len(blocks) == 3\n conv_block = blocks[0]\n inception_block = blocks[1]\n inception_aux_block = blocks[2]\n\n self.aux_logits = aux_logits\n self.transform_input = transform_input\n\n self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)\n self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n self.conv2 = conv_block(64, 64, kernel_size=1)\n self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)\n self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)\n self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)\n self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)\n self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)\n self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)\n self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)\n self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)\n self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)\n self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)\n\n if aux_logits:\n self.aux1 = inception_aux_block(512, num_classes, dropout=dropout_aux)\n self.aux2 = inception_aux_block(528, num_classes, dropout=dropout_aux)\n else:\n self.aux1 = None # type: ignore[assignment]\n self.aux2 = None # type: ignore[assignment]\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.dropout = nn.Dropout(p=dropout)\n self.fc = nn.Linear(1024, num_classes)\n\n if init_weights:\n self._initialize_weights()\n\n def _initialize_weights(self) -> None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n torch.nn.init.trunc_normal_(m.weight, mean=0.0, std=0.01, a=-2, b=2)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _transform_input(self, x: Tensor) -> Tensor:\n if self.transform_input:\n x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n x = torch.cat((x_ch0, x_ch1, x_ch2), 1)\n return x\n\n def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:\n # N x 3 x 224 x 224\n x = self.conv1(x)\n # N x 64 x 112 x 112\n x = self.maxpool1(x)\n # N x 64 x 56 x 56\n x = self.conv2(x)\n # N x 64 x 56 x 56\n x = self.conv3(x)\n # N x 192 x 56 x 56\n x = self.maxpool2(x)\n\n # N x 192 x 28 x 28\n x = self.inception3a(x)\n # N x 256 x 28 x 28\n x = self.inception3b(x)\n # N x 480 x 28 x 28\n x = self.maxpool3(x)\n # N x 480 x 14 x 14\n x = self.inception4a(x)\n # N x 512 x 14 x 14\n aux1: Optional[Tensor] = None\n if self.aux1 is not None:\n if self.training:\n aux1 = self.aux1(x)\n\n x = self.inception4b(x)\n # N x 512 x 14 x 14\n x = self.inception4c(x)\n # N x 512 x 14 x 14\n x = self.inception4d(x)\n # N x 528 x 14 x 14\n aux2: Optional[Tensor] = None\n if self.aux2 is not None:\n if self.training:\n aux2 = self.aux2(x)\n\n x = self.inception4e(x)\n # N x 832 x 14 x 14\n x = self.maxpool4(x)\n # N x 832 x 7 x 7\n x = self.inception5a(x)\n # N x 832 x 7 x 7\n x = self.inception5b(x)\n # N x 1024 x 7 x 7\n\n x = self.avgpool(x)\n # N x 1024 x 1 x 1\n x = torch.flatten(x, 1)\n # N x 1024\n x = self.dropout(x)\n x = self.fc(x)\n # N x 1000 (num_classes)\n return x, aux2, aux1\n\n @torch.jit.unused\n def eager_outputs(self, x: Tensor, aux2: Tensor, aux1: Optional[Tensor]) -> GoogLeNetOutputs:\n if self.training and self.aux_logits:\n return _GoogLeNetOutputs(x, aux2, aux1)\n else:\n return x # type: ignore[return-value]\n\n def forward(self, x: Tensor) -> GoogLeNetOutputs:\n x = self._transform_input(x)\n x, aux1, aux2 = self._forward(x)\n aux_defined = self.training and self.aux_logits\n if torch.jit.is_scripting():\n if not aux_defined:\n warnings.warn(\"Scripted GoogleNet always returns GoogleNetOutputs Tuple\")\n return GoogLeNetOutputs(x, aux2, aux1)\n else:\n return self.eager_outputs(x, aux2, aux1)\n\n\nclass Inception(nn.Module):\n def __init__(\n self,\n in_channels: int,\n ch1x1: int,\n ch3x3red: int,\n ch3x3: int,\n ch5x5red: int,\n ch5x5: int,\n pool_proj: int,\n conv_block: Optional[Callable[..., nn.Module]] = None,\n ) -> None:\n super().__init__()\n if conv_block is None:\n conv_block = BasicConv2d\n self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)\n\n self.branch2 = nn.Sequential(\n conv_block(in_channels, ch3x3red, kernel_size=1), conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1)\n )\n\n self.branch3 = nn.Sequential(\n conv_block(in_channels, ch5x5red, kernel_size=1),\n # Here, kernel_size=3 instead of kernel_size=5 is a known bug.\n # Please see https://github.com/pytorch/vision/issues/906 for details.\n conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1),\n )\n\n self.branch4 = nn.Sequential(\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),\n conv_block(in_channels, pool_proj, kernel_size=1),\n )\n\n def _forward(self, x: Tensor) -> List[Tensor]:\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n branch3 = self.branch3(x)\n branch4 = self.branch4(x)\n\n outputs = [branch1, branch2, branch3, branch4]\n return outputs\n\n def forward(self, x: Tensor) -> Tensor:\n outputs = self._forward(x)\n return torch.cat(outputs, 1)\n\n\nclass InceptionAux(nn.Module):\n def __init__(\n self,\n in_channels: int,\n num_classes: int,\n conv_block: Optional[Callable[..., nn.Module]] = None,\n dropout: float = 0.7,\n ) -> None:\n super().__init__()\n if conv_block is None:\n conv_block = BasicConv2d\n self.conv = conv_block(in_channels, 128, kernel_size=1)\n\n self.fc1 = nn.Linear(2048, 1024)\n self.fc2 = nn.Linear(1024, num_classes)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, x: Tensor) -> Tensor:\n # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14\n x = F.adaptive_avg_pool2d(x, (4, 4))\n # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4\n x = self.conv(x)\n # N x 128 x 4 x 4\n x = torch.flatten(x, 1)\n # N x 2048\n x = F.relu(self.fc1(x), inplace=True)\n # N x 1024\n x = self.dropout(x)\n # N x 1024\n x = self.fc2(x)\n # N x 1000 (num_classes)\n\n return x\n\n\nclass BasicConv2d(nn.Module):\n def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:\n super().__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\n\ndef googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> GoogLeNet:\n r\"\"\"GoogLeNet (Inception v1) model architecture from\n `\"Going Deeper with Convolutions\" <http://arxiv.org/abs/1409.4842>`_.\n The required minimum input size of the model is 15x15.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n aux_logits (bool): If True, adds two auxiliary branches that can improve training.\n Default: *False* when pretrained is True otherwise *True*\n transform_input (bool): If True, preprocesses the input according to the method with which it\n was trained on ImageNet. Default: True if ``pretrained=True``, else False.\n \"\"\"\n if pretrained:\n if \"transform_input\" not in kwargs:\n kwargs[\"transform_input\"] = True\n if \"aux_logits\" not in kwargs:\n kwargs[\"aux_logits\"] = False\n if kwargs[\"aux_logits\"]:\n warnings.warn(\n \"auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them\"\n )\n original_aux_logits = kwargs[\"aux_logits\"]\n kwargs[\"aux_logits\"] = True\n kwargs[\"init_weights\"] = False\n model = GoogLeNet(**kwargs)\n state_dict = load_state_dict_from_url(model_urls[\"googlenet\"], progress=progress)\n model.load_state_dict(state_dict)\n if not original_aux_logits:\n model.aux_logits = False\n model.aux1 = None # type: ignore[assignment]\n model.aux2 = None # type: ignore[assignment]\n return model\n\n return GoogLeNet(**kwargs)\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.cuda.device_count",
"torch.distributed.all_gather_object",
"torch.distributed.is_initialized",
"torch.inference_mode",
"torch.distributed.barrier",
"torch.tensor",
"torch.cuda.max_memory_allocated",
"torch.distributed.is_available",
"torch.cuda.is_available",
"torch.stack",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.all_reduce",
"torch.save"
],
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.init.trunc_normal_",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.unsqueeze",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu",
"torch.nn.AdaptiveAvgPool2d",
"torch.jit.is_scripting",
"torch.nn.BatchNorm2d",
"torch.flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UBC-MDS/world-energy-visualization | [
"6f8dd756a8c158a332fd94ca1f025fc16bcd77b3"
] | [
"src/tab1_mapview.py"
] | [
"from dash import Input, Output, callback, html, dcc, State\nimport dash_bootstrap_components as dbc\n\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport plotly.io as pio\nimport plotly.graph_objects as go\n\nfrom urllib.request import urlopen\nimport json\n\ndf_all = pd.read_csv(\n \"data/Primary-energy-consumption-from-fossilfuels-nuclear-renewables.csv\"\n)\ndf_notna_wide = df_all[df_all[\"Code\"].notna()]\ndf_notna = df_notna_wide.melt(\n id_vars=[\"Entity\", \"Code\", \"Year\"],\n value_vars=[\"Fossil\", \"Renewables\", \"Nuclear\"],\n var_name=\"energy_type\",\n value_name=\"percentage\",\n).merge(df_notna_wide, on=[\"Year\", \"Code\", \"Entity\"])\n\ndf_countries = df_notna[df_notna[\"Code\"] != \"OWID_WRL\"]\ndf_world = df_notna[df_notna[\"Code\"] == \"OWID_WRL\"]\ndf_continents = df_all[df_all[\"Code\"].isna()]\n\nlist_of_continents = df_continents[\"Entity\"].unique()\nlist_of_countries = df_countries[\"Entity\"].unique()\nlist_yrs = df_all[\"Year\"].unique()\n\nproj_param = {\n \"World\": [0, 0, 1],\n \"North America\": [40, -120, 2],\n \"Europe\": [50, 20, 4],\n \"Africa\": [0, 20, 2],\n}\n\n# ==============================================================================\n# Layout for map and barchart\n# ==============================================================================\n\ntab1_plots = dbc.Col(\n [\n dbc.Row(\n [\n html.H4(\"World Consumption by Country\", style={\"width\": \"fit-content\"}),\n dbc.Col(\n [\n dbc.Button(\n id=\"map_tooltip\",\n color=\"secondary\",\n children=\"?\",\n size=\"sm\",\n outline=True,\n ),\n dbc.Tooltip(\n \"Drag and select the number of year to view the change of engergy consumption distribution using the slide bar. You can hover or zoom to get the details of a specific region.\",\n target=\"map_tooltip\",\n placement=\"bottom\",\n ),\n ]\n ),\n ],\n style={\"padding\": \"3vh 0\"},\n ),\n dcc.Graph(id=\"tab1-map\"),\n html.Div(\n dcc.Slider(\n id=\"tab1-year-slider\",\n min=list_yrs.min(),\n max=list_yrs.max(),\n step=1,\n value=list_yrs.max(),\n marks={\n int(i): str(i) for i in np.append(list_yrs[::5], [list_yrs.max()])\n },\n tooltip={\"placement\": \"top\", \"always_visible\": True},\n updatemode=\"drag\",\n ),\n style={\"padding\": \"0vh 10vw\"},\n ),\n html.Br(),\n dbc.Row(\n [\n html.H4(\n \"Top/Bottom energy consumer nations\", style={\"width\": \"fit-content\"}\n ),\n dbc.Col(\n [\n dbc.Button(\n id=\"bar_tooltip\",\n color=\"secondary\",\n children=\"?\",\n size=\"sm\",\n outline=True,\n ),\n dbc.Tooltip(\n \"Select the number of countries to view in the bar plot using the input tab,\"\n \"then select whether to view to the top or bottom consumers.\"\n \"Hover the bar for details.\",\n target=\"bar_tooltip\",\n placement=\"bottom\",\n ),\n ],\n style={\"padding\": \"0 0\"},\n ),\n ]\n ),\n html.Br(),\n dbc.Row(\n [\n dbc.Col(\n [\n dbc.Row(\n [\n html.H4(\n \"Number of countries\",\n style={\"font-size\": \"20px\", \"width\": \"fit-content\"},\n ),\n dbc.Col(\n [\n dbc.Button(\n id=\"topN_tooltip\",\n color=\"secondary\",\n children=\"?\",\n size=\"sm\",\n outline=True,\n ),\n dbc.Tooltip(\n \"Controls the number of countries to view in the barchart. Select upto 15 countries\",\n target=\"topN_tooltip\",\n placement=\"bottom\",\n ),\n ],\n style={\"padding\": \"0 0\"},\n ),\n ]\n ),\n html.Br(),\n dbc.Input(\n id=\"tab1-input-topN\",\n value=10,\n type=\"number\",\n debounce=True,\n required=True,\n minlength=1,\n max=15,\n min=0,\n ),\n ]\n ),\n dbc.Col(\n [\n dbc.Row(\n [\n html.H4(\n \"Ranking type\",\n style={\"font-size\": \"20px\", \"width\": \"fit-content\"},\n ),\n dbc.Col(\n [\n dbc.Button(\n id=\"top_bot_tooltip\",\n color=\"secondary\",\n children=\"?\",\n size=\"sm\",\n outline=True,\n ),\n dbc.Tooltip(\n \"Select whether you want to view the top or bottom consumers\",\n target=\"top_bot_tooltip\",\n placement=\"bottom\",\n ),\n ],\n style={\"padding\": \"0 0\"},\n ),\n ]\n ),\n html.Br(),\n dcc.RadioItems(\n [\"Top\", \"Bottom\"],\n value=\"Top\",\n id=\"tab1_top_bot\",\n inline=True,\n labelStyle={\n \"margin-right\": \"10px\",\n \"margin-top\": \"1px\",\n \"display\": \"inline-block\",\n \"horizontal-align\": \"\",\n },\n ),\n ],\n style={\n \"padding\": \"0 0\",\n },\n ),\n ]\n ),\n html.Br(),\n dcc.Graph(id=\"tab1-barchart\"),\n ]\n)\n\n\n# ==============================================================================\n# World Map\n# ==============================================================================\n\n\n@callback(\n Output(\"tab1-map\", \"figure\"),\n Input(\"tab1-energy-type-dropdown\", \"value\"),\n Input(\"tab1-year-slider\", \"value\"),\n Input(\"tab1-map-focus\", \"value\"),\n)\ndef display_map(energy_type, year, scope):\n \"\"\"\n Docs\n \"\"\"\n # scope = \"Africa\"\n df = df_notna.query(\"Year==@year & energy_type==@energy_type\")\n\n fig = px.choropleth(\n df,\n locations=\"Code\",\n color=\"percentage\",\n hover_name=\"Entity\",\n hover_data={\n \"Year\": True,\n \"Fossil\": True,\n \"Nuclear\": True,\n \"Renewables\": True,\n \"percentage\": False,\n \"Code\": False,\n },\n color_continuous_scale=px.colors.sequential.YlGn,\n range_color=[0, 100],\n )\n\n fig.update_layout(\n dragmode=\"zoom\",\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0},\n title={\n \"text\": \"Global \"\n + str(energy_type)\n + \" Energy Consumption in \"\n + str(year),\n \"x\": 0.5,\n \"xanchor\": \"center\",\n },\n )\n\n fig.update_geos(\n showcountries=True,\n center={\"lat\": proj_param[scope][0], \"lon\": proj_param[scope][1]},\n projection={\"scale\": proj_param[scope][2]},\n )\n\n return fig\n\n\n# ==============================================================================\n# Top N countries barchart\n# ==============================================================================\n\n\n@callback(\n Output(\"tab1-barchart\", \"figure\"),\n Input(\"tab1-energy-type-dropdown\", \"value\"),\n Input(\"tab1-year-slider\", \"value\"),\n Input(\"tab1-input-topN\", \"value\"),\n Input(\"tab1_top_bot\", \"value\"),\n)\ndef display_barchart(energy_type, year, topN, top_bot):\n \"\"\"\n Docs\n \"\"\"\n\n if top_bot == \"Top\":\n df_sorted = df_countries.query(\n \"Year==@year & energy_type==@energy_type\"\n ).sort_values([\"percentage\"], ascending=False)[:topN]\n\n elif top_bot == \"Bottom\":\n df_sorted = df_countries.query(\n \"Year==@year & energy_type==@energy_type\"\n ).sort_values([\"percentage\"], ascending=False)[-topN:]\n\n fig_bar = px.bar(\n df_sorted,\n x=\"percentage\",\n y=\"Entity\",\n color=\"percentage\",\n # title=\"Bar Graph\",\n hover_name=\"Entity\",\n hover_data={\n \"Year\": True,\n \"Fossil\": True,\n \"Nuclear\": True,\n \"Renewables\": True,\n \"percentage\": False,\n \"Entity\": False,\n },\n range_color=[0, 100],\n color_continuous_scale=px.colors.sequential.YlGn,\n range_x=[0, 105],\n text_auto=True,\n )\n\n fig_bar.update_layout(\n xaxis_title=\"Percentage %\",\n yaxis_title=\"Country\",\n legend_title=\"%\",\n )\n fig_bar.update_coloraxes(showscale=False)\n fig_bar.update_traces(textposition=\"outside\")\n\n if top_bot == \"Top\":\n fig_bar.update_layout(\n yaxis={\"categoryorder\": \"total ascending\"},\n title={\n \"text\": \"Top \"\n + str(topN)\n + \" \"\n + str(energy_type)\n + \" Energy Consumers in \"\n + str(year),\n \"x\": 0.5,\n \"xanchor\": \"center\",\n },\n )\n\n elif top_bot == \"Bottom\":\n fig_bar.update_layout(\n # yaxis={\"categoryorder\": \"total descending\"},\n title={\n \"text\": \"Bottom \"\n + str(topN)\n + \" \"\n + str(energy_type)\n + \" Energy Consumers in \"\n + str(year),\n \"x\": 0.5,\n \"xanchor\": \"center\",\n },\n )\n\n return fig_bar\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Belvenix/IdleonCogOptimizer | [
"6b80b9f11bf0478e2e3522cb07b93b2c8834840b"
] | [
"src/python/board.py"
] | [
"from typing import Tuple\nfrom .cogs import Cog, EmptyCog, Player\nfrom .special_cogs import BoostedCog\nimport numpy as np\n\nclass Board:\n def __init__(self, height: int = 8, width: int = 12, locked: bool = True) -> None:\n self._visualization_board = ''\n self.board = np.array([[EmptyCog() for w in range(width)] for h in range(height)])\n if locked:\n self.mask = np.zeros_like(self.board)\n else:\n self.mask = np.ones_like(self.board)\n self.storage = []\n self.total_build = 0\n self.total_flaggy = 0\n self.total_exp = 0\n\n def unlock(self, mask: np.array):\n assert mask.shape == self.board.shape, \"Mask shape is different than board shape!\"\n self.mask = mask\n \n def empty(self) -> bool:\n for cog in self.board.flatten():\n if not isinstance(cog, EmptyCog):\n return False\n return True\n\n def place(self, x:int, y:int, cog: Cog = EmptyCog()) -> None:\n if self.validate(x, y):\n assert isinstance(cog, Cog), \"You can't place non-cogs on board!\"\n if not isinstance(self.board[y, x], EmptyCog):\n self.storage.append(self.board[y, x])\n self.board[y,x] = cog\n \n def clear(self):\n self.reset_board_values()\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n self.place(x, y, EmptyCog())\n\n def reset_board_values(self):\n self.total_build = 0\n self.total_flaggy = 0\n self.total_exp = 0\n\n def validate(self, x, y) -> bool:\n return (x >= 0 and y >= 0 and x < self.board.shape[1] and y < self.board.shape[0]) and (self.mask[y, x])\n\n def get_totals(self) -> Tuple[int, int, int]:\n return self.total_build, self.total_flaggy, self.total_exp\n\n def calculate_board(self):\n self.reset_loop()\n self.multiply_loop()\n self.sum_loop()\n\n def reset_loop(self):\n self.reset_board_values()\n for c in self.board.flatten():\n c.reset()\n\n def multiply_loop(self):\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n if self.validate(x, y):\n c = self.board[y, x]\n if isinstance(c, BoostedCog):\n boosted_coordinates, boosted_values = c.boosted()\n for bc in boosted_coordinates:\n dx, dy = bc[0], bc[1]\n \n if self.validate(x+dx, y+dy):\n boosted_cog = self.board[y+dy, x+dx]\n boosted_cog.apply_boost(*boosted_values)\n self.board[y+dy, x+dx] = boosted_cog\n \n def sum_loop(self):\n for x in range(self.board.shape[1]):\n for y in range(self.board.shape[0]):\n if self.validate(x, y):\n c = self.board[y, x]\n self.total_build +=c.get_values()[0]\n self.total_flaggy += c.get_values()[1]\n self.total_exp += c.get_values()[2]\n\n def show(self):\n self.print_rates()\n self.print_board()\n self.print_storage()\n self.print_players_info()\n\n def print_rates(self):\n print(\"Total build rate: \" + str(self.total_build) + '\\n' +\n \"Total flaggy rate: \" + str(self.total_flaggy) + '\\n' +\n \"Total extra exp: \" + str(self.total_exp))\n\n def print_board(self):\n board_print = ''\n for y in range(self.board.shape[0]):\n for x in range(self.board.shape[1]):\n board_print += str(self.board[y, x]) + '\\t'\n board_print = board_print[:-1] + '\\n'\n self._visualization_board = board_print\n print(self._visualization_board)\n \n def print_storage(self):\n storage_print = 'In storage: '\n for s in self.storage:\n storage_print += str(s) + ', '\n print(storage_print)\n\n def print_players_info(self):\n print('Player stats:')\n for c in self.board.flatten():\n if isinstance(c, Player):\n print(c.info())"
] | [
[
"numpy.ones_like",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anbhimi/Niffler | [
"81bf6c05132a58d05c7934f66edd0969c3bc9bf5"
] | [
"modules/png-extraction/ImageExtractor.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport glob \nfrom shutil import copyfile\nimport hashlib\nimport json\nimport sys\nimport subprocess\nimport logging\nfrom multiprocessing import Pool\nimport pdb\nimport time\nimport pickle\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport pydicom as dicom \nimport png\n# pydicom imports needed to handle data errors\nfrom pydicom import config\nfrom pydicom import datadict\nfrom pydicom import values \n\nimport pathlib\nconfigs = {}\n\n\ndef initialize_config_and_execute(config_values):\n global configs\n configs = config_values\n # Applying checks for paths\n \n p1 = pathlib.PurePath(configs['DICOMHome'])\n dicom_home = p1.as_posix() # the folder containing your dicom files\n\n p2 = pathlib.PurePath(configs['OutputDirectory'])\n output_directory = p2.as_posix()\n\n print_images = bool(configs['PrintImages'])\n print_only_common_headers = bool(configs['CommonHeadersOnly'])\n depth = int(configs['Depth'])\n processes = int(configs['UseProcesses']) # how many processes to use.\n flattened_to_level = configs['FlattenedToLevel']\n email = configs['YourEmail']\n send_email = bool(configs['SendEmail'])\n no_splits = int(configs['SplitIntoChunks'])\n is16Bit = bool(configs['is16Bit']) \n \n metadata_col_freq_threshold = 0.1\n\n png_destination = output_directory + '/extracted-images/'\n failed = output_directory + '/failed-dicom/'\n maps_directory = output_directory + '/maps/'\n meta_directory = output_directory + '/meta/'\n\n LOG_FILENAME = output_directory + '/ImageExtractor.out'\n pickle_file = output_directory + '/ImageExtractor.pickle'\n\n # record the start time\n t_start = time.time()\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)\n\n if not os.path.exists(maps_directory):\n os.makedirs(maps_directory)\n\n if not os.path.exists(meta_directory):\n os.makedirs(meta_directory)\n\n if not os.path.exists(png_destination):\n os.makedirs(png_destination)\n\n if not os.path.exists(failed):\n os.makedirs(failed)\n\n if not os.path.exists(failed + \"/1\"):\n os.makedirs(failed + \"/1\")\n\n if not os.path.exists(failed + \"/2\"):\n os.makedirs(failed + \"/2\")\n\n if not os.path.exists(failed + \"/3\"):\n os.makedirs(failed + \"/3\")\n\n if not os.path.exists(failed + \"/4\"):\n os.makedirs(failed + \"/4\")\n\n logging.info(\"------- Values Initialization DONE -------\")\n final_res = execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,\n processes, flattened_to_level, email, send_email, no_splits, is16Bit, png_destination,\n failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start)\n return final_res\n\n\n# Function for getting tuple for field,val pairs\ndef get_tuples(plan, outlist = None, key = \"\"):\n if len(key)>0:\n key = key + \"_\"\n if not outlist:\n outlist = []\n for aa in plan.dir():\n try:\n hasattr(plan,aa)\n except TypeError as e:\n logging.warning('Type Error encountered')\n if hasattr(plan, aa) and aa!= 'PixelData':\n value = getattr(plan, aa)\n start = len(outlist)\n # if dicom sequence extract tags from each element\n if type(value) is dicom.sequence.Sequence:\n for nn, ss in enumerate(list(value)):\n newkey = \"_\".join([key,(\"%d\"%nn),aa]) if len(key) else \"_\".join([(\"%d\"%nn),aa])\n candidate = get_tuples(ss,outlist=None,key=newkey)\n # if extracted tuples are too big condense to a string\n if len(candidate)>2000:\n outlist.append((newkey,str(candidate)))\n else:\n outlist.extend(candidate)\n else:\n if type(value) is dicom.valuerep.DSfloat:\n value = float(value)\n elif type(value) is dicom.valuerep.IS:\n value = str(value)\n elif type(value) is dicom.valuerep.MultiValue:\n value = tuple(value)\n elif type(value) is dicom.uid.UID:\n value = str(value)\n outlist.append((key + aa, value))\n # appends name, value pair for this file. these are later concatenated to the dataframe\n return outlist\n\n\ndef extract_headers(f_list_elem):\n nn,ff = f_list_elem # unpack enumerated list\n plan = dicom.dcmread(ff, force=True) # reads in dicom file\n # checks if this file has an image\n c=True\n try:\n check = plan.pixel_array # throws error if dicom file has no image\n except:\n c = False\n kv = get_tuples(plan) # gets tuple for field,val pairs for this file. function defined above\n # dicom images should not have more than 300 dicom tags\n if len(kv)>500:\n logging.debug(str(len(kv)) + \" dicom tags produced by \" + ff)\n kv.append(('file', f_list_elem[1])) # adds my custom field with the original filepath\n kv.append(('has_pix_array',c)) # adds my custom field with if file has image\n if c:\n # adds my custom category field - useful if classifying images before processing\n kv.append(('category','uncategorized'))\n else:\n kv.append(('category','no image')) # adds my custom category field, makes note as imageless\n return dict(kv)\n\n\n# Function to extract pixel array information\n# takes an integer used to index into the global filedata dataframe\n# returns tuple of\n# filemapping: dicom to png paths (as str)\n# fail_path: dicom to failed folder (as tuple)\n# found_err: error code produced when processing\ndef extract_images(filedata, i, png_destination, flattened_to_level, failed, is16Bit):\n ds = dicom.dcmread(filedata.iloc[i].loc['file'], force=True) # read file in\n found_err=None\n filemapping = \"\"\n fail_path = \"\"\n try:\n im = ds.pixel_array # pull image from read dicom\n imName=os.path.split(filedata.iloc[i].loc['file'])[1][:-4] # get file name ex: IM-0107-0022\n\n if flattened_to_level == 'patient':\n ID = filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n folderName = hashlib.sha224(ID.encode('utf-8')).hexdigest()\n # check for existence of patient folder. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n elif flattened_to_level == 'study':\n ID1 = filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n try:\n ID2 = filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.\n except:\n ID2='ALL-STUDIES'\n folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID2.encode('utf-8')).hexdigest()\n # check for existence of the folder tree patient/study/series. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n else:\n ID1=filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n try:\n ID2=filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.\n ID3=filedata.iloc[i].loc['SeriesInstanceUID'] # Unique identifier of the Series.\n except:\n ID2='ALL-STUDIES'\n ID3='ALL-SERIES'\n folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID2.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID3.encode('utf-8')).hexdigest()\n # check for existence of the folder tree patient/study/series. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n\n\n pngfile = png_destination+folderName + '/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'\n dicom_path = filedata.iloc[i].loc['file']\n image_path = png_destination+folderName+'/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'\n if is16Bit:\n # write the PNG file as a 16-bit greyscale \n image_2d = ds.pixel_array.astype(np.double) \n # # Rescaling grey scale between 0-255\n image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 65535.0 \n # # Convert to uint\n shape = ds.pixel_array.shape\n image_2d_scaled = np.uint16(image_2d_scaled) \n with open(pngfile , 'wb') as png_file:\n w = png.Writer(shape[1], shape[0], greyscale=True,bitdepth=16)\n w.write(png_file, image_2d_scaled)\n else: \n shape = ds.pixel_array.shape\n # Convert to float to avoid overflow or underflow losses.\n image_2d = ds.pixel_array.astype(float)\n # Rescaling grey scale between 0-255\n image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 255.0\n # onvert to uint\n image_2d_scaled = np.uint8(image_2d_scaled)\n # Write the PNG file\n with open(pngfile , 'wb') as png_file:\n w = png.Writer(shape[1], shape[0], greyscale=True)\n w.write(png_file, image_2d_scaled)\n filemapping = filedata.iloc[i].loc['file'] + ', ' + pngfile + '\\n'\n except AttributeError as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '1/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except ValueError as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '2/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except BaseException as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '3/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except Exception as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '4/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n return (filemapping, fail_path, found_err)\n\n\n# Function when pydicom fails to read a value attempt to read as other types.\ndef fix_mismatch_callback(raw_elem, **kwargs):\n try:\n if raw_elem.VR: \n values.convert_value(raw_elem.VR, raw_elem)\n except BaseException as err:\n for vr in kwargs['with_VRs']:\n try:\n values.convert_value(vr, raw_elem)\n except ValueError:\n pass\n except TypeError:\n continue\n else:\n raw_elem = raw_elem._replace(VR=vr)\n return raw_elem\n\n\ndef get_path(depth, dicom_home):\n directory = dicom_home + '/'\n i = 0\n while i < depth:\n directory += \"*/\"\n i += 1\n return directory + \"*.dcm\"\n\n \n# Function used by pydicom.\ndef fix_mismatch(with_VRs=['PN', 'DS', 'IS']):\n \"\"\"A callback function to check that RawDataElements are translatable\n with their provided VRs. If not, re-attempt translation using\n some other translators.\n Parameters\n ----------\n with_VRs : list, [['PN', 'DS', 'IS']]\n A list of VR strings to attempt if the raw data element value cannot\n be translated with the raw data element's VR.\n Returns\n -------\n No return value. The callback function will return either\n the original RawDataElement instance, or one with a fixed VR.\n \"\"\"\n dicom.config.data_element_callback = fix_mismatch_callback\n config.data_element_callback_kwargs = {\n 'with_VRs': with_VRs,\n } \n\n\ndef execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,\n processes, flattened_to_level, email, send_email, no_splits, is16Bit, png_destination,\n failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start):\n err = None\n fix_mismatch()\n if processes == 0.5: # use half the cores to avoid high ram usage\n core_count = int(os.cpu_count()/2)\n elif processes == 0: # use all the cores\n core_count = int(os.cpu_count())\n elif processes < os.cpu_count(): # use the specified number of cores to avoid high ram usage\n core_count = processes\n else:\n core_count = int(os.cpu_count())\n # get set up to create dataframe\n dirs = os.listdir(dicom_home)\n # gets all dicom files. if editing this code, get filelist into the format of a list of strings,\n # with each string as the file path to a different dicom file.\n file_path = get_path(depth, dicom_home)\n\n if os.path.isfile(pickle_file):\n f=open(pickle_file,'rb')\n filelist=pickle.load(f)\n else:\n filelist=glob.glob(file_path, recursive=True) # search the folders at the depth we request and finds all dicoms\n pickle.dump(filelist,open(pickle_file,'wb'))\n file_chunks = np.array_split(filelist,no_splits)\n logging.info('Number of dicom files: ' + str(len(filelist)))\n\n try:\n ff = filelist[0] # load first file as a template to look at all\n except IndexError:\n logging.error(\"There is no file present in the given folder in \" + file_path)\n sys.exit(1)\n\n plan = dicom.dcmread(ff, force=True)\n logging.debug('Loaded the first file successfully')\n\n keys = [(aa) for aa in plan.dir() if (hasattr(plan, aa) and aa != 'PixelData')]\n # checks for images in fields and prints where they are\n for field in plan.dir():\n if (hasattr(plan, field) and field!='PixelData'):\n entry = getattr(plan, field)\n if type(entry) is bytes:\n logging.debug(field)\n logging.debug(str(entry))\n\n for i,chunk in enumerate(file_chunks):\n csv_destination = \"{}/meta/metadata_{}.csv\".format(output_directory,i)\n mappings = \"{}/maps/mapping_{}.csv\".format(output_directory,i)\n fm = open(mappings, \"w+\")\n filemapping = 'Original DICOM file location, PNG location \\n'\n fm.write(filemapping)\n\n # add a check to see if the metadata has already been extracted\n # step through whole file list, read in file, append fields to future dataframe of all files\n\n headerlist = []\n # start up a multi processing pool\n # for every item in filelist send data to a subprocess and run extract_headers func\n # output is then added to headerlist as they are completed (no ordering is done)\n with Pool(core_count) as p:\n res= p.imap_unordered(extract_headers, enumerate(chunk))\n for i,e in enumerate(res):\n headerlist.append(e)\n data = pd.DataFrame(headerlist)\n logging.info('Chunk ' + str(i) + ' Number of fields per file : ' + str(len(data.columns)))\n # find common fields\n # make dataframe containing all fields and all files minus those removed in previous block\n # export csv file of final dataframe\n export_csv = data.to_csv(csv_destination, index = None, header=True)\n fields=data.keys()\n count = 0 # potential painpoint\n # writting of log handled by main process\n if print_images:\n logging.info(\"Start processing Images\")\n filedata = data\n total = len(chunk)\n stamp = time.time()\n for i in range(len(filedata)):\n (fmap,fail_path,err) = extract_images(filedata, i, png_destination, flattened_to_level, failed, is16Bit)\n if err:\n count +=1\n copyfile(fail_path[0],fail_path[1])\n err_msg = str(count) + ' out of ' + str(len(chunk)) + ' dicom images have failed extraction'\n logging.error(err_msg)\n else:\n fm.write(fmap)\n fm.close()\n logging.info('Chunk run time: %s %s', time.time() - t_start, ' seconds!')\n\n logging.info('Generating final metadata file')\n\n col_names = dict()\n all_headers = dict()\n total_length = 0\n\n metas = glob.glob( \"{}*.csv\".format(meta_directory))\n # for each meta file identify the columns that are not na's for at least 10% (metadata_col_freq_threshold) of data\n for meta in metas:\n m = pd.read_csv(meta,dtype='str')\n d_len = m.shape[0]\n total_length += d_len\n\n for e in m.columns:\n col_pop = d_len - np.sum(m[e].isna()) # number of populated rows for this column in this metadata file\n\n if e in col_names:\n col_names[e] += col_pop\n else:\n col_names[e] = col_pop\n \n # all_headers keeps track of number of appearances of each header. We later use this count to ensure that\n # the headers we use are present in all metadata files.\n if e in all_headers:\n all_headers[e] += 1\n else:\n all_headers[e] = 1\n\n loadable_names = list()\n for k in col_names.keys():\n if k in all_headers and all_headers[k] >= no_splits: # no_splits == number of batches used \n if col_names[k] >= metadata_col_freq_threshold*total_length:\n loadable_names.append(k) # use header only if it's present in every metadata file\n \n # load every metadata file using only valid columns\n meta_list = list()\n for meta in metas:\n m = pd.read_csv(meta,dtype='str',usecols=loadable_names)\n meta_list.append(m)\n merged_meta = pd.concat(meta_list,ignore_index=True)\n merged_meta.to_csv('{}/metadata.csv'.format(output_directory),index=False)\n # getting a single mapping file\n logging.info('Generatign final mapping file')\n mappings = glob.glob(\"{}/maps/*.csv\".format(output_directory))\n map_list = list()\n for mapping in mappings:\n map_list.append(pd.read_csv(mapping,dtype='str'))\n merged_maps = pd.concat(map_list,ignore_index=True)\n if print_only_common_headers:\n mask_common_fields = merged_maps.isnull().mean() < 0.1\n common_fields = set(np.asarray(merged_maps.columns)[mask_common_fields])\n merged_maps = merged_maps[common_fields]\n merged_maps.to_csv('{}/mapping.csv'.format(output_directory),index=False)\n\n if send_email:\n subprocess.call('echo \"Niffler has successfully completed the png conversion\" | mail -s \"The image conversion'\n ' has been complete\" {0}'.format(email), shell=True)\n # Record the total run-time\n logging.info('Total run time: %s %s', time.time() - t_start, ' seconds!')\n logging.shutdown() # Closing logging file after extraction is done !!\n logs = []\n logs.append(err)\n logs.append(\"The PNG conversion is SUCCESSFUL\")\n return logs\n\n\nif __name__ == \"__main__\":\n with open('config.json', 'r') as f:\n niffler = json.load(f)\n\n # CLI Argument Parser\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"--DICOMHome\", default=niffler['DICOMHome'])\n ap.add_argument(\"--OutputDirectory\", default=niffler['OutputDirectory'])\n ap.add_argument(\"--Depth\", default=niffler['Depth'])\n ap.add_argument(\"--SplitIntoChunks\", default=niffler['SplitIntoChunks'])\n ap.add_argument(\"--PrintImages\", default=niffler['PrintImages'])\n ap.add_argument(\"--CommonHeadersOnly\", default=niffler['CommonHeadersOnly'])\n ap.add_argument(\"--UseProcesses\", default=niffler['UseProcesses'])\n ap.add_argument(\"--FlattenedToLevel\", default=niffler['FlattenedToLevel'])\n ap.add_argument(\"--is16Bit\", default=niffler['is16Bit'])\n ap.add_argument(\"--SendEmail\", default=niffler['SendEmail'])\n ap.add_argument(\"--YourEmail\", default=niffler['YourEmail'])\n\n args = vars(ap.parse_args())\n\n if len(args) > 0:\n initialize_config_and_execute(args)\n else:\n initialize_config_and_execute(niffler)\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"numpy.maximum",
"numpy.asarray",
"numpy.uint8",
"pandas.DataFrame",
"numpy.uint16",
"numpy.array_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
PuneethaPai/transformers | [
"a34a9896ac2a4a33ff9cd805c76eed914c8d8965",
"a34a9896ac2a4a33ff9cd805c76eed914c8d8965"
] | [
"examples/text-classification/run_glue.py",
"src/transformers/convert_pytorch_checkpoint_to_tf2.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).\"\"\"\n\n\nimport dataclasses\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Dict, Optional\n\nimport numpy as np\n\nfrom transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset\nfrom transformers import GlueDataTrainingArguments as DataTrainingArguments\nfrom transformers import (\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n glue_compute_metrics,\n glue_output_modes,\n glue_tasks_num_labels,\n set_seed,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n training_args.local_rank,\n training_args.device,\n training_args.n_gpu,\n bool(training_args.local_rank != -1),\n training_args.fp16,\n )\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed\n set_seed(training_args.seed)\n\n try:\n num_labels = glue_tasks_num_labels[data_args.task_name]\n output_mode = glue_output_modes[data_args.task_name]\n except KeyError:\n raise ValueError(\"Task not found: %s\" % (data_args.task_name))\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=data_args.task_name,\n cache_dir=model_args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n )\n model = AutoModelForSequenceClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n # Get datasets\n train_dataset = GlueDataset(data_args, tokenizer=tokenizer) if training_args.do_train else None\n eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"dev\") if training_args.do_eval else None\n test_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"test\") if training_args.do_predict else None\n\n def compute_metrics(p: EvalPrediction) -> Dict:\n if output_mode == \"classification\":\n preds = np.argmax(p.predictions, axis=1)\n elif output_mode == \"regression\":\n preds = np.squeeze(p.predictions)\n return glue_compute_metrics(data_args.task_name, preds, p.label_ids)\n\n # Initialize our Trainer\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n trainer.train(\n model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None\n )\n trainer.save_model()\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_master():\n tokenizer.save_pretrained(training_args.output_dir)\n\n # Evaluation\n eval_results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_datasets = [eval_dataset]\n if data_args.task_name == \"mnli\":\n mnli_mm_data_args = dataclasses.replace(data_args, task_name=\"mnli-mm\")\n eval_datasets.append(GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode=\"dev\"))\n\n for eval_dataset in eval_datasets:\n eval_result = trainer.evaluate(eval_dataset=eval_dataset)\n\n output_eval_file = os.path.join(\n training_args.output_dir, f\"eval_results_{eval_dataset.args.task_name}.txt\"\n )\n if trainer.is_world_master():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(eval_dataset.args.task_name))\n for key, value in eval_result.items():\n logger.info(\" %s = %s\", key, value)\n writer.write(\"%s = %s\\n\" % (key, value))\n\n eval_results.update(eval_result)\n\n if training_args.do_predict:\n logging.info(\"*** Test ***\")\n test_datasets = [test_dataset]\n if data_args.task_name == \"mnli\":\n mnli_mm_data_args = dataclasses.replace(data_args, task_name=\"mnli-mm\")\n test_datasets.append(GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode=\"test\"))\n\n for test_dataset in test_datasets:\n predictions = trainer.predict(test_dataset=test_dataset).predictions\n if output_mode == \"classification\":\n predictions = np.argmax(predictions, axis=1)\n\n output_test_file = os.path.join(\n training_args.output_dir, f\"test_results_{test_dataset.args.task_name}.txt\"\n )\n if trainer.is_world_master():\n with open(output_test_file, \"w\") as writer:\n logger.info(\"***** Test results {} *****\".format(test_dataset.args.task_name))\n writer.write(\"index\\tprediction\\n\")\n for index, item in enumerate(predictions):\n if output_mode == \"regression\":\n writer.write(\"%d\\t%3.3f\\n\" % (index, item))\n else:\n item = test_dataset.get_labels()[item]\n writer.write(\"%d\\t%s\\n\" % (index, item))\n return eval_results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n",
"# coding=utf-8\n# Copyright 2018 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Convert pytorch checkpoints to TensorFlow \"\"\"\n\n\nimport argparse\nimport logging\nimport os\n\nfrom transformers import (\n ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,\n DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,\n FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,\n OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,\n T5_PRETRAINED_CONFIG_ARCHIVE_MAP,\n TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,\n XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,\n XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,\n XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,\n AlbertConfig,\n BertConfig,\n CamembertConfig,\n CTRLConfig,\n DistilBertConfig,\n ElectraConfig,\n FlaubertConfig,\n GPT2Config,\n OpenAIGPTConfig,\n RobertaConfig,\n T5Config,\n TFAlbertForPreTraining,\n TFBertForPreTraining,\n TFBertForQuestionAnswering,\n TFBertForSequenceClassification,\n TFCamembertForMaskedLM,\n TFCTRLLMHeadModel,\n TFDistilBertForMaskedLM,\n TFDistilBertForQuestionAnswering,\n TFElectraForPreTraining,\n TFFlaubertWithLMHeadModel,\n TFGPT2LMHeadModel,\n TFOpenAIGPTLMHeadModel,\n TFRobertaForMaskedLM,\n TFRobertaForSequenceClassification,\n TFT5ForConditionalGeneration,\n TFTransfoXLLMHeadModel,\n TFXLMRobertaForMaskedLM,\n TFXLMWithLMHeadModel,\n TFXLNetLMHeadModel,\n TransfoXLConfig,\n XLMConfig,\n XLMRobertaConfig,\n XLNetConfig,\n cached_path,\n is_torch_available,\n load_pytorch_checkpoint_in_tf2_model,\n)\n\n\nif is_torch_available():\n import torch\n import numpy as np\n from transformers import (\n BertForPreTraining,\n BertForQuestionAnswering,\n BertForSequenceClassification,\n BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n GPT2LMHeadModel,\n GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLNetLMHeadModel,\n XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLMWithLMHeadModel,\n XLM_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLMRobertaForMaskedLM,\n TransfoXLLMHeadModel,\n TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,\n OpenAIGPTLMHeadModel,\n OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,\n RobertaForMaskedLM,\n RobertaForSequenceClassification,\n ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n CamembertForMaskedLM,\n CamembertForSequenceClassification,\n CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n FlaubertWithLMHeadModel,\n DistilBertForMaskedLM,\n DistilBertForQuestionAnswering,\n DistilBertForSequenceClassification,\n DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n CTRLLMHeadModel,\n CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,\n AlbertForPreTraining,\n ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n T5ForConditionalGeneration,\n T5_PRETRAINED_MODEL_ARCHIVE_MAP,\n ElectraForPreTraining,\n ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,\n )\nelse:\n (\n BertForPreTraining,\n BertForQuestionAnswering,\n BertForSequenceClassification,\n BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n GPT2LMHeadModel,\n GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLNetLMHeadModel,\n XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLMWithLMHeadModel,\n XLM_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLMRobertaForMaskedLM,\n TransfoXLLMHeadModel,\n TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,\n OpenAIGPTLMHeadModel,\n OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,\n RobertaForMaskedLM,\n RobertaForSequenceClassification,\n ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n CamembertForMaskedLM,\n CamembertForSequenceClassification,\n CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n FlaubertWithLMHeadModel,\n DistilBertForMaskedLM,\n DistilBertForSequenceClassification,\n DistilBertForQuestionAnswering,\n DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n CTRLLMHeadModel,\n CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,\n AlbertForPreTraining,\n ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n T5ForConditionalGeneration,\n T5_PRETRAINED_MODEL_ARCHIVE_MAP,\n ElectraForPreTraining,\n ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,\n ) = (\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n )\n\n\nlogging.basicConfig(level=logging.INFO)\n\nMODEL_CLASSES = {\n \"bert\": (\n BertConfig,\n TFBertForPreTraining,\n BertForPreTraining,\n BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"bert-large-uncased-whole-word-masking-finetuned-squad\": (\n BertConfig,\n TFBertForQuestionAnswering,\n BertForQuestionAnswering,\n BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"bert-large-cased-whole-word-masking-finetuned-squad\": (\n BertConfig,\n TFBertForQuestionAnswering,\n BertForQuestionAnswering,\n BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"bert-base-cased-finetuned-mrpc\": (\n BertConfig,\n TFBertForSequenceClassification,\n BertForSequenceClassification,\n BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"gpt2\": (\n GPT2Config,\n TFGPT2LMHeadModel,\n GPT2LMHeadModel,\n GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,\n GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"xlnet\": (\n XLNetConfig,\n TFXLNetLMHeadModel,\n XLNetLMHeadModel,\n XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"xlm\": (\n XLMConfig,\n TFXLMWithLMHeadModel,\n XLMWithLMHeadModel,\n XLM_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"xlm-roberta\": (\n XLMRobertaConfig,\n TFXLMRobertaForMaskedLM,\n XLMRobertaForMaskedLM,\n XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"transfo-xl\": (\n TransfoXLConfig,\n TFTransfoXLLMHeadModel,\n TransfoXLLMHeadModel,\n TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,\n TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"openai-gpt\": (\n OpenAIGPTConfig,\n TFOpenAIGPTLMHeadModel,\n OpenAIGPTLMHeadModel,\n OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,\n OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"roberta\": (\n RobertaConfig,\n TFRobertaForMaskedLM,\n RobertaForMaskedLM,\n ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"roberta-large-mnli\": (\n RobertaConfig,\n TFRobertaForSequenceClassification,\n RobertaForSequenceClassification,\n ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"camembert\": (\n CamembertConfig,\n TFCamembertForMaskedLM,\n CamembertForMaskedLM,\n CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"flaubert\": (\n FlaubertConfig,\n TFFlaubertWithLMHeadModel,\n FlaubertWithLMHeadModel,\n FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"distilbert\": (\n DistilBertConfig,\n TFDistilBertForMaskedLM,\n DistilBertForMaskedLM,\n DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"distilbert-base-distilled-squad\": (\n DistilBertConfig,\n TFDistilBertForQuestionAnswering,\n DistilBertForQuestionAnswering,\n DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"ctrl\": (\n CTRLConfig,\n TFCTRLLMHeadModel,\n CTRLLMHeadModel,\n CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,\n CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"albert\": (\n AlbertConfig,\n TFAlbertForPreTraining,\n AlbertForPreTraining,\n ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"t5\": (\n T5Config,\n TFT5ForConditionalGeneration,\n T5ForConditionalGeneration,\n T5_PRETRAINED_MODEL_ARCHIVE_MAP,\n T5_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n \"electra\": (\n ElectraConfig,\n TFElectraForPreTraining,\n ElectraForPreTraining,\n ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,\n ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ),\n}\n\n\ndef convert_pt_checkpoint_to_tf(\n model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True\n):\n if model_type not in MODEL_CLASSES:\n raise ValueError(\"Unrecognized model type, should be one of {}.\".format(list(MODEL_CLASSES.keys())))\n\n config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type]\n\n # Initialise TF model\n if config_file in aws_config_map:\n config_file = cached_path(aws_config_map[config_file], force_download=not use_cached_models)\n config = config_class.from_json_file(config_file)\n config.output_hidden_states = True\n config.output_attentions = True\n print(\"Building TensorFlow model from configuration: {}\".format(str(config)))\n tf_model = model_class(config)\n\n # Load weights from tf checkpoint\n if pytorch_checkpoint_path in aws_model_maps:\n pytorch_checkpoint_path = cached_path(\n aws_model_maps[pytorch_checkpoint_path], force_download=not use_cached_models\n )\n # Load PyTorch checkpoint in tf2 model:\n tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path)\n\n if compare_with_pt_model:\n tfo = tf_model(tf_model.dummy_inputs, training=False) # build the network\n\n state_dict = torch.load(pytorch_checkpoint_path, map_location=\"cpu\")\n pt_model = pt_model_class.from_pretrained(\n pretrained_model_name_or_path=None, config=config, state_dict=state_dict\n )\n\n with torch.no_grad():\n pto = pt_model(**pt_model.dummy_inputs)\n\n np_pt = pto[0].numpy()\n np_tf = tfo[0].numpy()\n diff = np.amax(np.abs(np_pt - np_tf))\n print(\"Max absolute difference between models outputs {}\".format(diff))\n assert diff <= 2e-2, \"Error, model absolute difference is >2e-2: {}\".format(diff)\n\n # Save pytorch-model\n print(\"Save TensorFlow model to {}\".format(tf_dump_path))\n tf_model.save_weights(tf_dump_path, save_format=\"h5\")\n\n\ndef convert_all_pt_checkpoints_to_tf(\n args_model_type,\n tf_dump_path,\n model_shortcut_names_or_path=None,\n config_shortcut_names_or_path=None,\n compare_with_pt_model=False,\n use_cached_models=False,\n remove_cached_files=False,\n only_convert_finetuned_models=False,\n):\n assert os.path.isdir(args.tf_dump_path), \"--tf_dump_path should be a directory\"\n\n if args_model_type is None:\n model_types = list(MODEL_CLASSES.keys())\n else:\n model_types = [args_model_type]\n\n for j, model_type in enumerate(model_types, start=1):\n print(\"=\" * 100)\n print(\" Converting model type {}/{}: {}\".format(j, len(model_types), model_type))\n print(\"=\" * 100)\n if model_type not in MODEL_CLASSES:\n raise ValueError(\n \"Unrecognized model type {}, should be one of {}.\".format(model_type, list(MODEL_CLASSES.keys()))\n )\n\n config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type]\n\n if model_shortcut_names_or_path is None:\n model_shortcut_names_or_path = list(aws_model_maps.keys())\n if config_shortcut_names_or_path is None:\n config_shortcut_names_or_path = model_shortcut_names_or_path\n\n for i, (model_shortcut_name, config_shortcut_name) in enumerate(\n zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1\n ):\n print(\"-\" * 100)\n if \"-squad\" in model_shortcut_name or \"-mrpc\" in model_shortcut_name or \"-mnli\" in model_shortcut_name:\n if not only_convert_finetuned_models:\n print(\" Skipping finetuned checkpoint {}\".format(model_shortcut_name))\n continue\n model_type = model_shortcut_name\n elif only_convert_finetuned_models:\n print(\" Skipping not finetuned checkpoint {}\".format(model_shortcut_name))\n continue\n print(\n \" Converting checkpoint {}/{}: {} - model_type {}\".format(\n i, len(aws_config_map), model_shortcut_name, model_type\n )\n )\n print(\"-\" * 100)\n\n if config_shortcut_name in aws_config_map:\n config_file = cached_path(aws_config_map[config_shortcut_name], force_download=not use_cached_models)\n else:\n config_file = cached_path(config_shortcut_name, force_download=not use_cached_models)\n\n if model_shortcut_name in aws_model_maps:\n model_file = cached_path(aws_model_maps[model_shortcut_name], force_download=not use_cached_models)\n else:\n model_file = cached_path(model_shortcut_name, force_download=not use_cached_models)\n\n if os.path.isfile(model_shortcut_name):\n model_shortcut_name = \"converted_model\"\n\n convert_pt_checkpoint_to_tf(\n model_type=model_type,\n pytorch_checkpoint_path=model_file,\n config_file=config_file,\n tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + \"-tf_model.h5\"),\n compare_with_pt_model=compare_with_pt_model,\n )\n if remove_cached_files:\n os.remove(config_file)\n os.remove(model_file)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\n \"--tf_dump_path\", default=None, type=str, required=True, help=\"Path to the output Tensorflow dump file.\"\n )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n help=\"Model type selected in the list of {}. If not given, will download and convert all the models from AWS.\".format(\n list(MODEL_CLASSES.keys())\n ),\n )\n parser.add_argument(\n \"--pytorch_checkpoint_path\",\n default=None,\n type=str,\n help=\"Path to the PyTorch checkpoint path or shortcut name to download from AWS. \"\n \"If not given, will download and convert all the checkpoints from AWS.\",\n )\n parser.add_argument(\n \"--config_file\",\n default=None,\n type=str,\n help=\"The config json file corresponding to the pre-trained model. \\n\"\n \"This specifies the model architecture. If not given and \"\n \"--pytorch_checkpoint_path is not given or is a shortcut name\"\n \"use the configuration associated to the shortcut name on the AWS\",\n )\n parser.add_argument(\n \"--compare_with_pt_model\", action=\"store_true\", help=\"Compare Tensorflow and PyTorch model predictions.\"\n )\n parser.add_argument(\n \"--use_cached_models\",\n action=\"store_true\",\n help=\"Use cached models if possible instead of updating to latest checkpoint versions.\",\n )\n parser.add_argument(\n \"--remove_cached_files\",\n action=\"store_true\",\n help=\"Remove pytorch models after conversion (save memory when converting in batches).\",\n )\n parser.add_argument(\"--only_convert_finetuned_models\", action=\"store_true\", help=\"Only convert finetuned models.\")\n args = parser.parse_args()\n\n # if args.pytorch_checkpoint_path is not None:\n # convert_pt_checkpoint_to_tf(args.model_type.lower(),\n # args.pytorch_checkpoint_path,\n # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,\n # args.tf_dump_path,\n # compare_with_pt_model=args.compare_with_pt_model,\n # use_cached_models=args.use_cached_models)\n # else:\n convert_all_pt_checkpoints_to_tf(\n args.model_type.lower() if args.model_type is not None else None,\n args.tf_dump_path,\n model_shortcut_names_or_path=[args.pytorch_checkpoint_path]\n if args.pytorch_checkpoint_path is not None\n else None,\n config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,\n compare_with_pt_model=args.compare_with_pt_model,\n use_cached_models=args.use_cached_models,\n remove_cached_files=args.remove_cached_files,\n only_convert_finetuned_models=args.only_convert_finetuned_models,\n )\n"
] | [
[
"numpy.squeeze",
"numpy.argmax"
],
[
"torch.no_grad",
"numpy.abs",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
voidrank/Geo-CNN | [
"4e8a7d5cc0d14ffa2a1b8bef854f294ae4e25f8b"
] | [
"train/test.py"
] | [
"''' Evaluating Frustum PointNets.\nWrite evaluation results to KITTI format labels.\nand [optionally] write results to pickle files.\n\nAuthor: Charles R. Qi\nDate: September 2017\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport argparse\nimport importlib\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\nfrom IPython import embed\nfrom model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER\nimport provider\nfrom train_util import get_batch\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')\nparser.add_argument('--model', default='frustum_pointnets_v1', help='Model name [default: frustum_pointnets_v1]')\nparser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size for inference [default: 32]')\nparser.add_argument('--output', default='test_results', help='output file/folder name [default: test_results]')\nparser.add_argument('--data_path', default=None, help='frustum dataset pickle filepath [default: None]')\nparser.add_argument('--from_rgb_detection', action='store_true', help='test from dataset files from rgb detection.')\nparser.add_argument('--idx_path', default=None, help='filename of txt where each line is a data idx, used for rgb detection -- write <id>.txt for all frames. [default: None]')\nparser.add_argument('--dump_result', action='store_true', help='If true, also dump results to .pickle file')\nparser.add_argument('--vis', action='store_true', help='If true, save visualization')\nFLAGS = parser.parse_args()\n\n# Set training configurations\nBATCH_SIZE = FLAGS.batch_size\nMODEL_PATH = FLAGS.model_path\nGPU_INDEX = FLAGS.gpu\nNUM_POINT = FLAGS.num_point\nMODEL = importlib.import_module(FLAGS.model)\nNUM_CLASSES = 2\nNUM_CHANNEL = 4\n\n# Load Frustum Datasets.\nTEST_DATASET = provider.FrustumDataset(npoints=NUM_POINT, split='val',\n rotate_to_center=True, overwritten_data_path=FLAGS.data_path,\n from_rgb_detection=FLAGS.from_rgb_detection, one_hot=True)\n\ndef get_session_and_ops(batch_size, num_point):\n ''' Define model graph, load model parameters,\n create session and return session handle and tensors\n '''\n with tf.Graph().as_default():\n with tf.device('/gpu:'+str(GPU_INDEX)):\n pointclouds_pl, one_hot_vec_pl, labels_pl, centers_pl, \\\n heading_class_label_pl, heading_residual_label_pl, \\\n size_class_label_pl, size_residual_label_pl = \\\n MODEL.placeholder_inputs(batch_size, num_point)\n is_training_pl = tf.placeholder(tf.bool, shape=())\n end_points = MODEL.get_model(pointclouds_pl, one_hot_vec_pl,\n is_training_pl)\n loss = MODEL.get_loss(labels_pl, centers_pl,\n heading_class_label_pl, heading_residual_label_pl,\n size_class_label_pl, size_residual_label_pl, end_points)\n saver = tf.train.Saver()\n\n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n sess = tf.Session(config=config)\n\n # Restore variables from disk.\n saver.restore(sess, MODEL_PATH)\n ops = {'pointclouds_pl': pointclouds_pl,\n 'one_hot_vec_pl': one_hot_vec_pl,\n 'labels_pl': labels_pl,\n 'centers_pl': centers_pl,\n 'heading_class_label_pl': heading_class_label_pl,\n 'heading_residual_label_pl': heading_residual_label_pl,\n 'size_class_label_pl': size_class_label_pl,\n 'size_residual_label_pl': size_residual_label_pl,\n 'is_training_pl': is_training_pl,\n 'logits': end_points['mask_logits'],\n 'center': end_points['center'],\n 'end_points': end_points,\n 'loss': loss,\n 'vis': end_points['vis']}\n return sess, ops\n\ndef softmax(x):\n ''' Numpy function for softmax'''\n shape = x.shape\n probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))\n probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)\n return probs\n\ndef inference(sess, ops, pc, one_hot_vec, batch_size):\n ''' Run inference for frustum pointnets in batch mode '''\n assert pc.shape[0]%batch_size == 0\n num_batches = pc.shape[0]//batch_size\n logits = np.zeros((pc.shape[0], pc.shape[1], NUM_CLASSES))\n centers = np.zeros((pc.shape[0], 3))\n heading_logits = np.zeros((pc.shape[0], NUM_HEADING_BIN))\n heading_residuals = np.zeros((pc.shape[0], NUM_HEADING_BIN))\n size_logits = np.zeros((pc.shape[0], NUM_SIZE_CLUSTER))\n size_residuals = np.zeros((pc.shape[0], NUM_SIZE_CLUSTER, 3))\n scores = np.zeros((pc.shape[0],)) # 3D box score`\n vis = np.zeros((pc.shape[0], pc.shape[1]))\n\n ep = ops['end_points']\n for i in range(num_batches):\n feed_dict = {\\\n ops['pointclouds_pl']: pc[i*batch_size:(i+1)*batch_size,...],\n ops['one_hot_vec_pl']: one_hot_vec[i*batch_size:(i+1)*batch_size,:],\n ops['is_training_pl']: False}\n\n batch_logits, batch_centers, \\\n batch_heading_scores, batch_heading_residuals, \\\n batch_size_scores, batch_size_residuals, batch_vis = \\\n sess.run([ops['logits'], ops['center'],\n ep['heading_scores'], ep['heading_residuals'],\n ep['size_scores'], ep['size_residuals'], ops['vis']],\n feed_dict=feed_dict)\n\n logits[i*batch_size:(i+1)*batch_size,...] = batch_logits\n centers[i*batch_size:(i+1)*batch_size,...] = batch_centers\n heading_logits[i*batch_size:(i+1)*batch_size,...] = batch_heading_scores\n heading_residuals[i*batch_size:(i+1)*batch_size,...] = batch_heading_residuals\n size_logits[i*batch_size:(i+1)*batch_size,...] = batch_size_scores\n size_residuals[i*batch_size:(i+1)*batch_size,...] = batch_size_residuals\n if FLAGS.vis:\n vis[i*batch_size:(i+1)*batch_size,...] = batch_vis[:,:,0].mean(axis=2)\n\n # Compute scores\n batch_seg_prob = softmax(batch_logits)[:,:,1] # BxN\n batch_seg_mask = np.argmax(batch_logits, 2) # BxN\n mask_mean_prob = np.sum(batch_seg_prob * batch_seg_mask, 1) # B,\n mask_mean_prob = mask_mean_prob / np.sum(batch_seg_mask,1) # B,\n heading_prob = np.max(softmax(batch_heading_scores),1) # B\n size_prob = np.max(softmax(batch_size_scores),1) # B,\n batch_scores = np.log(mask_mean_prob) + np.log(heading_prob) + np.log(size_prob)\n scores[i*batch_size:(i+1)*batch_size] = batch_scores \n # Finished computing scores\n\n heading_cls = np.argmax(heading_logits, 1) # B\n size_cls = np.argmax(size_logits, 1) # B\n heading_res = np.array([heading_residuals[i,heading_cls[i]] \\\n for i in range(pc.shape[0])])\n size_res = np.vstack([size_residuals[i,size_cls[i],:] \\\n for i in range(pc.shape[0])])\n\n return np.argmax(logits, 2), centers, heading_cls, heading_res, \\\n size_cls, size_res, scores, vis\n\ndef write_detection_results(result_dir, id_list, type_list, box2d_list, center_list, \\\n heading_cls_list, heading_res_list, \\\n size_cls_list, size_res_list, \\\n rot_angle_list, score_list):\n ''' Write frustum pointnets results to KITTI format label files. '''\n if result_dir is None: return\n results = {} # map from idx to list of strings, each string is a line (without \\n)\n for i in range(len(center_list)):\n idx = id_list[i]\n output_str = type_list[i] + \" -1 -1 -10 \"\n box2d = box2d_list[i]\n output_str += \"%f %f %f %f \" % (box2d[0],box2d[1],box2d[2],box2d[3])\n h,w,l,tx,ty,tz,ry = provider.from_prediction_to_label_format(center_list[i],\n heading_cls_list[i], heading_res_list[i],\n size_cls_list[i], size_res_list[i], rot_angle_list[i])\n score = score_list[i]\n output_str += \"%f %f %f %f %f %f %f %f\" % (h,w,l,tx,ty,tz,ry,score)\n if idx not in results: results[idx] = []\n results[idx].append(output_str)\n\n # Write TXT files\n if not os.path.exists(result_dir): os.mkdir(result_dir)\n output_dir = os.path.join(result_dir, 'data')\n if not os.path.exists(output_dir): os.mkdir(output_dir)\n for idx in results:\n pred_filename = os.path.join(output_dir, '%06d.txt'%(idx))\n fout = open(pred_filename, 'w')\n for line in results[idx]:\n fout.write(line+'\\n')\n fout.close() \n\ndef fill_files(output_dir, to_fill_filename_list):\n ''' Create empty files if not exist for the filelist. '''\n for filename in to_fill_filename_list:\n filepath = os.path.join(output_dir, filename)\n if not os.path.exists(filepath):\n fout = open(filepath, 'w')\n fout.close()\n\ndef test_from_rgb_detection(output_filename, result_dir=None):\n ''' Test frustum pointents with 2D boxes from a RGB detector.\n Write test results to KITTI format label files.\n todo (rqi): support variable number of points.\n '''\n ps_list = []\n segp_list = []\n center_list = []\n heading_cls_list = []\n heading_res_list = []\n size_cls_list = []\n size_res_list = []\n rot_angle_list = []\n score_list = []\n onehot_list = []\n vis_list = []\n\n test_idxs = np.arange(0, len(TEST_DATASET))\n print(len(TEST_DATASET))\n batch_size = BATCH_SIZE\n num_batches = int((len(TEST_DATASET)+batch_size-1)/batch_size)\n\n batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))\n batch_one_hot_to_feed = np.zeros((batch_size, 3))\n sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)\n for batch_idx in range(num_batches):\n print('batch idx: %d' % (batch_idx))\n start_idx = batch_idx * batch_size\n end_idx = min(len(TEST_DATASET), (batch_idx+1) * batch_size)\n cur_batch_size = end_idx - start_idx\n\n batch_data, batch_rot_angle, batch_rgb_prob, batch_one_hot_vec = \\\n get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,\n NUM_POINT, NUM_CHANNEL, from_rgb_detection=True)\n batch_data_to_feed[0:cur_batch_size,...] = batch_data\n batch_one_hot_to_feed[0:cur_batch_size,:] = batch_one_hot_vec\n\n # Run one batch inference\n batch_output, batch_center_pred,\\\n batch_hclass_pred, batch_hres_pred, \\\n batch_sclass_pred, batch_sres_pred, batch_scores, batch_vis = \\\n inference(sess, ops, batch_data_to_feed,\n batch_one_hot_to_feed, batch_size=batch_size)\n\n for i in range(cur_batch_size):\n ps_list.append(batch_data[i,...])\n segp_list.append(batch_output[i,...])\n center_list.append(batch_center_pred[i,:])\n heading_cls_list.append(batch_hclass_pred[i])\n heading_res_list.append(batch_hres_pred[i])\n size_cls_list.append(batch_sclass_pred[i])\n size_res_list.append(batch_sres_pred[i,:])\n rot_angle_list.append(batch_rot_angle[i])\n #score_list.append(batch_scores[i])\n score_list.append(batch_rgb_prob[i]) # 2D RGB detection score\n onehot_list.append(batch_one_hot_vec[i])\n vis_list.append(batch_vis[i,:])\n\n if FLAGS.dump_result:\n with open(output_filename, 'wp') as fp:\n pickle.dump(ps_list, fp)\n pickle.dump(segp_list, fp)\n pickle.dump(center_list, fp)\n pickle.dump(heading_cls_list, fp)\n pickle.dump(heading_res_list, fp)\n pickle.dump(size_cls_list, fp)\n pickle.dump(size_res_list, fp)\n pickle.dump(rot_angle_list, fp)\n pickle.dump(score_list, fp)\n pickle.dump(onehot_list, fp)\n\n if FLAGS.vis:\n with open(output_filename, 'wb') as fp:\n pickle.dump(ps_list, fp)\n pickle.dump(vis_list, fp)\n\n # Write detection results for KITTI evaluation\n print('Number of point clouds: %d' % (len(ps_list)))\n write_detection_results(result_dir, TEST_DATASET.id_list,\n TEST_DATASET.type_list, TEST_DATASET.box2d_list,\n center_list, heading_cls_list, heading_res_list,\n size_cls_list, size_res_list, rot_angle_list, score_list)\n # Make sure for each frame (no matter if we have measurment for that frame),\n # there is a TXT file\n output_dir = os.path.join(result_dir, 'data')\n if FLAGS.idx_path is not None:\n to_fill_filename_list = [line.rstrip()+'.txt' \\\n for line in open(FLAGS.idx_path)]\n fill_files(output_dir, to_fill_filename_list)\n\ndef test(output_filename, result_dir=None):\n ''' Test frustum pointnets with GT 2D boxes.\n Write test results to KITTI format label files.\n todo (rqi): support variable number of points.\n '''\n ps_list = []\n seg_list = []\n segp_list = []\n center_list = []\n heading_cls_list = []\n heading_res_list = []\n size_cls_list = []\n size_res_list = []\n rot_angle_list = []\n score_list = []\n\n test_idxs = np.arange(0, len(TEST_DATASET))\n batch_size = BATCH_SIZE\n num_batches = len(TEST_DATASET)//batch_size\n\n sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)\n\n correct_cnt = 0\n for batch_idx in range(num_batches):\n print('batch idx: %d' % (batch_idx))\n start_idx = batch_idx * batch_size\n end_idx = (batch_idx+1) * batch_size\n\n batch_data, batch_label, batch_center, \\\n batch_hclass, batch_hres, batch_sclass, batch_sres, \\\n batch_rot_angle, batch_one_hot_vec = \\\n get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,\n NUM_POINT, NUM_CHANNEL)\n\n batch_output, batch_center_pred, \\\n batch_hclass_pred, batch_hres_pred, \\\n batch_sclass_pred, batch_sres_pred, batch_scores = \\\n inference(sess, ops, batch_data,\n batch_one_hot_vec, batch_size=batch_size)\n\n correct_cnt += np.sum(batch_output==batch_label)\n\t\n for i in range(batch_output.shape[0]):\n ps_list.append(batch_data[i,...])\n seg_list.append(batch_label[i,...])\n segp_list.append(batch_output[i,...])\n center_list.append(batch_center_pred[i,:])\n heading_cls_list.append(batch_hclass_pred[i])\n heading_res_list.append(batch_hres_pred[i])\n size_cls_list.append(batch_sclass_pred[i])\n size_res_list.append(batch_sres_pred[i,:])\n rot_angle_list.append(batch_rot_angle[i])\n score_list.append(batch_scores[i])\n\n print(\"Segmentation accuracy: %f\" % \\\n (correct_cnt / float(batch_size*num_batches*NUM_POINT)))\n\n if FLAGS.dump_result:\n with open(output_filename, 'wp') as fp:\n pickle.dump(ps_list, fp)\n pickle.dump(seg_list, fp)\n pickle.dump(segp_list, fp)\n pickle.dump(center_list, fp)\n pickle.dump(heading_cls_list, fp)\n pickle.dump(heading_res_list, fp)\n pickle.dump(size_cls_list, fp)\n pickle.dump(size_res_list, fp)\n pickle.dump(rot_angle_list, fp)\n pickle.dump(score_list, fp)\n\n # Write detection results for KITTI evaluation\n write_detection_results(result_dir, TEST_DATASET.id_list,\n TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list,\n heading_cls_list, heading_res_list,\n size_cls_list, size_res_list, rot_angle_list, score_list)\n\n\nif __name__=='__main__':\n if FLAGS.from_rgb_detection:\n test_from_rgb_detection(FLAGS.output+'.pickle', FLAGS.output)\n else:\n test(FLAGS.output+'.pickle', FLAGS.output)\n"
] | [
[
"numpy.log",
"tensorflow.Graph",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
rubind/wfc3_psf | [
"68e0d4b88e4a614939ae0c8771e37f315574ab82"
] | [
"wfc3_psf.py"
] | [
"import numpy as np\nfrom astropy.io import fits\nfrom scipy.interpolate import RectBivariateSpline\n\ndef index_PSF(PSF, x, y):\n return PSF[x+y*3]\n\ndef get_native_PSF(filt, x, y, the_path):\n x = float(np.clip(x, 0, 1014))\n y = float(np.clip(y, 0, 1014))\n\n f = fits.open(\"%sPSFSTD_WFC3IR_%s.fits\" % (the_path, filt))\n PSF = f[0].data\n f.close()\n\n if x < 507:\n sx = x/507.\n minx = 0\n else:\n sx = (x - 507.)/507.\n minx = 1\n \n if y < 507:\n sy = y/507.\n miny = 0\n else:\n sy = (y - 507.)/507.\n miny = 1\n\n out_PSF = 0.\n for dx in [0, 1]:\n for dy in [0, 1]:\n this_x = minx + dx\n this_y = miny + dy\n this_w = (sx*(dx == 1) + (1 - sx)*(dx == 0))*(sy*(dy == 1) + (1 - sy)*(dy == 0))\n print (\"x\", x, \"y\", y, \"this_x\", this_x, \"this_y\", this_y, \"this_w\", this_w)\n out_PSF += index_PSF(PSF, x = this_x, y = this_y)*this_w\n return out_PSF\n\ndef get_sampled_PSF(filt, x, y, subsample, the_path = \"./\"):\n native_PSF = get_native_PSF(filt, x, y, the_path)\n orig_sub = np.arange(len(native_PSF), dtype=np.float64)*0.25\n orig_sub -= np.median(orig_sub)\n\n ifn = RectBivariateSpline(orig_sub, orig_sub, native_PSF, kx = 3, ky = 3, s=0)\n new_sub = np.arange(len(native_PSF)*subsample/4., dtype=np.float64)/subsample\n new_sub -= np.median(new_sub)\n\n return ifn(new_sub, new_sub)\n\n"
] | [
[
"numpy.median",
"scipy.interpolate.RectBivariateSpline",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
CUrW-SL/DSS-Framework | [
"43a39b322ffb0eb92dd116e77cf9a8479357a121"
] | [
"accuracy_unit/wrf/wrf_accuracy.py"
] | [
"import math\nfrom datetime import datetime, timedelta\nimport sys\nfrom airflow.models import Variable\nimport pandas as pd\nimport numpy as np\n\nsys.path.insert(0, '/home/curw/git/DSS-Framework/db_util')\n# sys.path.insert(0, '/home/hasitha/PycharmProjects/DSS-Framework/db_util')\nfrom gen_db import CurwFcstAdapter, CurwObsAdapter, CurwSimAdapter\nfrom dss_db import RuleEngineAdapter\n\nCOMMON_DATE_TIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nSTATION_TYPE = 'CUrW_WeatherStation'\nMME_TAG = 'MDPA'\nVARIABLE_TYPE = 'rainfall'\nVARIABLE = 1\nUNIT = 1\nOBS_VARIABLE = 10\nOBS_UNIT = 9\nGFS_DAYS = 3\n\n\ndef get_curw_dss_adapter(db_config=None):\n if db_config is None:\n db_config = Variable.get('db_config', deserialize_json=True)\n adapter = RuleEngineAdapter.get_instance(db_config)\n return adapter\n\n\ndef get_curw_fcst_adapter(db_config=None):\n if db_config is None:\n db_config = Variable.get('fcst_db_config', deserialize_json=True)\n adapter = CurwFcstAdapter.get_instance(db_config)\n return adapter\n\n\ndef get_curw_obs_adapter(db_config=None):\n if db_config is None:\n db_config = Variable.get('obs_db_config', deserialize_json=True)\n adapter = CurwObsAdapter.get_instance(db_config)\n return adapter\n\n\ndef get_curw_sim_adapter(db_config=None):\n if db_config is None:\n db_config = Variable.get('sim_db_config', deserialize_json=True)\n adapter = CurwSimAdapter.get_instance(db_config)\n return adapter\n\n\ndef calculate_wrf_rule_accuracy(wrf_rule, exec_datetime):\n print('calculate_wrf_rule_accuracy|wrf_rule : ', wrf_rule)\n print('calculate_wrf_rule_accuracy|execution_date : ', exec_datetime)\n wrf_model = 'WRF_{}'.format(wrf_rule['model'])\n print('calculate_wrf_rule_accuracy|wrf_model : ', wrf_model)\n wrf_version = wrf_rule['version']\n wrf_run = wrf_rule['rule_info']['run']\n wrf_rule_id = wrf_rule['rule_info']['id']\n gfs_hour = wrf_rule['rule_info']['hour']\n accuracy_rule_id = wrf_rule['rule_info']['accuracy_rule']\n sim_tag = 'gfs_d{}_{}'.format(wrf_run, gfs_hour)\n print('calculate_wrf_rule_accuracy|sim_tag : ', sim_tag)\n dss_adapter = get_curw_dss_adapter()\n accuracy_rule = dss_adapter.get_accuracy_rule_info_by_id(accuracy_rule_id)\n print('calculate_wrf_rule_accuracy|accuracy_rule : ', accuracy_rule)\n obs_station_list = format_obs_station_list(accuracy_rule['observed_stations'], accuracy_rule['allowed_error'])\n success_count = 0\n if len(obs_station_list) > 0:\n for [obs_station, allowed_error] in obs_station_list:\n station_error = calculate_station_accuracy(obs_station, wrf_model, wrf_version, wrf_run, gfs_hour,\n exec_datetime, sim_tag)\n if station_error is not None:\n if station_error <= allowed_error:\n success_count + 1\n total_stations = len(obs_station_list)\n print('calculate_wrf_rule_accuracy|total_stations : ', total_stations)\n print('calculate_wrf_rule_accuracy|success_count : ', success_count)\n accuracy_percentage = (success_count / total_stations) * 100\n print('calculate_wrf_rule_accuracy|accuracy_percentage : ', total_stations)\n dss_adapter.update_wrf_rule_accuracy_level(accuracy_percentage, wrf_rule_id)\n print('wrf rule current accuracy successfully updated.')\n accuracy_rule = dss_adapter.get_accuracy_rule_info_by_id(accuracy_rule_id)\n expected_accuracy = float(accuracy_rule['rule_accuracy'])\n if accuracy_percentage >= expected_accuracy:\n return True\n else:\n return False\n\n\ndef calculate_station_accuracy(obs_station, wrf_model, wrf_version, wrf_run, gfs_hour,\n exec_datetime, sim_tag, method='MAD'):\n obs_adapter = get_curw_obs_adapter()\n obs_station_id = get_obs_station_id(obs_station, obs_adapter)\n [tms_start, tms_end] = get_wrf_ts_start_end(exec_datetime, wrf_run, gfs_hour)\n tms_start = tms_start.strftime('%Y-%m-%d %H:%M:%S')\n tms_end = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if obs_station_id is not None:\n obs_hash_id = get_obs_station_hash_id(obs_station_id, obs_adapter)\n obs_df = get_obs_tms(obs_hash_id, exec_datetime, tms_start, tms_end, obs_adapter)\n if obs_df is not None:\n sim_adapter = get_curw_sim_adapter()\n wrf_station_id = get_matching_wrf_station(obs_station, obs_station_id, sim_adapter)\n print('calculate_station_accuracy|wrf_station_id : ', wrf_station_id)\n if wrf_station_id is not None:\n fcst_adapter = get_curw_fcst_adapter()\n wrf_hash_id = get_wrf_station_hash_id(wrf_model, wrf_version, wrf_station_id, exec_datetime, sim_tag,\n fcst_adapter)\n print('calculate_station_accuracy|wrf_hash_id : ', wrf_hash_id)\n if wrf_hash_id is not None:\n fcst_df = get_fcst_tms(wrf_hash_id, exec_datetime, tms_start, tms_end, fcst_adapter)\n if fcst_df is not None:\n print('calculate_station_accuracy|obs_df : ', obs_df)\n print('calculate_station_accuracy|fcst_df : ', fcst_df)\n merged_df = obs_df.merge(fcst_df, how='left', on='time')\n merged_df['cumulative_observed'] = merged_df['observed'].cumsum()\n merged_df['cumulative_forecast'] = merged_df['forecast'].cumsum()\n print(merged_df)\n merged_df['cum_diff'] = merged_df[\"cumulative_observed\"] - merged_df[\"cumulative_forecast\"]\n row_count = len(merged_df.index)\n print('row_count : ', row_count)\n if method == 'MAD':\n print('MAD')\n merged_df['abs_cum_diff'] = merged_df['cum_diff'].abs()\n sum_abs_diff = merged_df['abs_diff'].sum()\n print('sum_abs_diff : ', sum_abs_diff)\n mean_absolute_deviation = sum_abs_diff / row_count\n print('mean_absolute_deviation : ', mean_absolute_deviation)\n return mean_absolute_deviation\n elif method == 'RMSE':\n print('RMSE')\n merged_df['diff_square'] = np.power((merged_df['cum_diff']), 2)\n root_mean_square_error = math.sqrt(merged_df['diff_square'].sum() / row_count)\n print('root_mean_square_error : ', root_mean_square_error)\n return root_mean_square_error\n else:\n print('Invalid method.')\n return None\n\n\ndef format_obs_station_list(obs_stations, allowed_error):\n station_list = obs_stations.split(\",\")\n print(station_list)\n formatted_list = []\n for station in station_list:\n station_val = station.split('-')\n if len(station_val) == 2:\n formatted_list.append([station_val[0], station_val[1]])\n else:\n formatted_list.append([station_val[0], allowed_error])\n print(formatted_list)\n return formatted_list\n\n\ndef get_obs_station_id(obs_station, obs_adapter=None):\n if obs_adapter is None:\n obs_adapter = get_curw_obs_adapter()\n station_id = obs_adapter.get_station_id_by_name(STATION_TYPE, obs_station)\n if station_id is not None:\n print('get_obs_station_id|station_id : ', station_id)\n return station_id\n\n\ndef get_obs_station_hash_id(obs_station_id, obs_adapter=None):\n if obs_adapter is None:\n obs_adapter = get_curw_obs_adapter()\n hash_id = obs_adapter.get_station_hash_id(obs_station_id, OBS_VARIABLE, OBS_UNIT)\n if hash_id is not None:\n print('get_obs_station_hash_id|hash_id : ', hash_id)\n return hash_id\n\n\ndef get_matching_wrf_station(obs_station, obs_station_id, sim_adapter=None):\n if obs_station_id is not None:\n grid_id = '{}_{}_{}_{}'.format(VARIABLE_TYPE, obs_station_id, obs_station, MME_TAG)\n print('get_matching_wrf_station|grid_id : ', grid_id)\n if sim_adapter is None:\n sim_adapter = get_curw_sim_adapter()\n wrf_station_id = sim_adapter.get_matching_wrf_station_by_grid_id(grid_id)\n if wrf_station_id is not None:\n print('get_matching_wrf_station|wrf_station_id : ', wrf_station_id)\n return wrf_station_id\n return None\n\n\ndef get_wrf_station_hash_id(wrf_model, wrf_version, wrf_station_id, exec_date, sim_tag, fcst_adapter=None):\n if fcst_adapter is None:\n fcst_adapter = get_curw_fcst_adapter()\n source_id = fcst_adapter.get_source_id(wrf_model, wrf_version)\n if source_id is not None:\n print('get_wrf_station_hash_id|source_id : ', source_id)\n hash_id = fcst_adapter.get_hash_id_of_station(VARIABLE, UNIT, source_id, wrf_station_id, sim_tag, exec_date)\n if hash_id is not None:\n print('get_wrf_station_hash_id|hash_id : ', hash_id)\n return hash_id\n\n\ndef get_wrf_ts_start_end(exec_datetime, wrf_run, gfs_hour):\n wrf_run = int(wrf_run)\n exec_datetime = datetime.strptime(exec_datetime, '%Y-%m-%d %H:%M:%S')\n print(exec_datetime)\n exec_date_str = exec_datetime.strftime('%Y-%m-%d')\n exec_date = datetime.strptime(exec_date_str, '%Y-%m-%d')\n print(exec_date)\n ts_start_date = exec_date - timedelta(days=wrf_run)\n ts_start_date_str = ts_start_date.strftime('%Y-%m-%d')\n print(ts_start_date_str)\n gfs_ts_start_utc_str = '{} {}:00:00'.format(ts_start_date_str, gfs_hour)\n print(gfs_ts_start_utc_str)\n gfs_ts_start_utc = datetime.strptime(gfs_ts_start_utc_str, '%Y-%m-%d %H:%M:%S')\n gfs_ts_start_local = gfs_ts_start_utc + timedelta(hours=5, minutes=30)\n gfs_ts_end_local = gfs_ts_start_local + timedelta(days=GFS_DAYS)\n return [gfs_ts_start_local, gfs_ts_end_local]\n\n\ndef get_fcst_tms(wrf_station_hash_id, exec_datetime, tms_start, tms_end, fcst_adapter=None):\n if fcst_adapter is None:\n fcst_adapter = get_curw_fcst_adapter()\n tms_df = fcst_adapter.get_station_tms(wrf_station_hash_id, exec_datetime, tms_start, tms_end)\n if tms_df is not None:\n return format_df_to_time_indexing(tms_df)\n\n\ndef format_df_to_time_indexing(tms_df):\n tms_df['time'] = pd.to_datetime(tms_df['time'], format=COMMON_DATE_TIME_FORMAT)\n print('format_df_to_time_indexing|tms_df : ', tms_df)\n tms_df.set_index('time', inplace=True)\n return tms_df\n\n\ndef get_obs_tms(obs_station_hash_id, exec_datetime, tms_start, tms_end, obs_adapter=None):\n if obs_adapter is None:\n obs_adapter = get_curw_obs_adapter()\n tms_df = obs_adapter.get_timeseries_by_id(obs_station_hash_id, tms_start, tms_end)\n if tms_df is not None:\n return format_df_to_15min_intervals(tms_df)\n\n\ndef format_df_to_15min_intervals(tms_df):\n tms_df = format_df_to_time_indexing(tms_df)\n min15_ts = pd.DataFrame()\n min15_ts['value'] = tms_df['value'].resample('15min', label='right', closed='right').sum()\n print(min15_ts)\n return min15_ts\n\n\nif __name__ == \"__main__\":\n # obs_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',\n # 'mysql_db': 'curw_obs', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}\n # print(len(obs_db_config.keys()))\n # sim_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',\n # 'mysql_db': 'curw_sim', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}\n # fcst_db_config = {'mysql_user': 'admin', 'mysql_password': 'floody', 'mysql_host': '35.227.163.211',\n # 'mysql_db': 'curw_fcst', 'log_path': '/home/hasitha/PycharmProjects/DSS-Framework/log'}\n # obs_adapter = get_curw_obs_adapter(obs_db_config)\n # sim_adapter = get_curw_sim_adapter(sim_db_config)\n # fcst_adapter = get_curw_fcst_adapter(fcst_db_config)\n # print(get_matching_wrf_station('Arangala', obs_adapter, sim_adapter))\n print(get_wrf_ts_start_end('2019-12-07 07:21:32', '2', '12'))\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
zzzace2000/cairl_nodegam | [
"90d0d56a0e7be3d1cbba6179cbfc36d626456770",
"90d0d56a0e7be3d1cbba6179cbfc36d626456770",
"90d0d56a0e7be3d1cbba6179cbfc36d626456770"
] | [
"lib/mma.py",
"lib/lightning/airl/sepsis.py",
"mimic_preproc/extract-scripts/get_cohort_baseline_info.py"
] | [
"import logging\n\nimport cvxpy as cvx\nimport numpy as np\nfrom numpy.linalg import norm\nfrom tqdm import tqdm\n\n\nclass MaxMarginAbbeel(object):\n \"\"\"\n implementation of (Abbeel & Ng 2004)\n\n two versions: available\n\n 1. max-margin (stable, computationally more heavy)\n 2. projection (simpler)\n\n \"\"\"\n\n def __init__(self,\n pi_init,\n p,\n mu_expert,\n irl_precision,\n mdp_solver,\n mu_estimator,\n evaluators,\n method=\"max_margin\",\n slack_scale=0.01,\n use_slack=False,\n stochastic=True,\n delta=0.2\n ):\n \"\"\"TODO: to be defined1.\n\n Parameters\n ----------\n p : int\n dimension of phi\n mu_expert : target for feature expectation IRL\n mu_estimator : function\n estimate E[mu(s_0) | pi, D]\n evaluator : function\n evaluate i.t.o perf score and action matching\n irl_precision : convergence threshold\n use_slack : whether to use slack for convex optimization\n slack_scale : scaling term\n method: max_margin or projection\n \"\"\"\n self._pi_init = pi_init\n self._p = p\n self._mu_expert = mu_expert\n self._mu_estimator = mu_estimator\n self._irl_precision = irl_precision\n self._method = method\n self._evaluators = evaluators\n self._mdp_solver = mdp_solver\n self._use_slack = use_slack\n self._slack_scale = slack_scale\n self._stochastic = stochastic\n self._delta = delta\n\n def run(self, n_iteration):\n \"\"\"TODO: Docstring for something.\n\n Parameters\n ----------\n n_iteration : max iteration count\n\n Returns\n -------\n exp results\n \"\"\"\n mu_estimator = self._mu_estimator\n stochastic = self._stochastic\n\n pi_list = []\n pi_best_list = []\n mu_list = []\n mu_bar_list = []\n weight_list = []\n weight_best_list = []\n margin_v_list = []\n margin_mu_list = []\n\n pi_list.append(self._pi_init)\n\n mu_estimator.fit(self._pi_init, stochastic)\n mu_irl = mu_estimator.estimate()\n\n mu_list.append(mu_irl)\n mu_bar_list.append(mu_irl)\n\n weight_list.append(-1.0)\n margin_v_list.append(-1.0)\n margin_mu_list.append(-1.0)\n\n eval_metrics = {}\n\n # Evaluate the inital policy\n for e in self._evaluators:\n the_metrics = e.evaluate(self._pi_init)\n for k, v in the_metrics.items():\n if k not in eval_metrics:\n eval_metrics[k] = []\n eval_metrics['best_' + k] = []\n eval_metrics[k].append(v)\n\n for epi_i in tqdm(range(n_iteration)):\n if self._method == \"max_margin\":\n W, (margin_v, margin_mu, converged) = self._optimize(mu_list)\n elif self._method == \"projection\":\n W, (margin_v, margin_mu, converged, mu_bar_im1) = \\\n self._optimize_projection(mu_list, mu_bar_list)\n mu_bar_list.append(mu_bar_im1)\n else:\n raise Exception(\"Unknown IRL solver\")\n\n weight_list.append(W)\n margin_v_list.append(margin_v)\n margin_mu_list.append(margin_mu)\n logging.info(\"margin_v: {}\".format(margin_v))\n logging.info(\"margin_mu: {}\".format(margin_mu))\n margin_hyperplane = 2 / norm(W, 2)\n logging.info(\"margin_hyperplane: {}\".format(margin_hyperplane))\n\n if converged:\n logging.info(\"margin_mu converged after {} iterations\".format(epi_i + 1))\n break\n\n pi_irl = self._mdp_solver.solve(reward_fn=lambda obs_next: obs_next.dot(W))\n pi_list.append(pi_irl)\n\n mu_estimator.fit(pi_irl, stochastic)\n mu_irl = mu_estimator.estimate()\n\n mu_list.append(mu_irl)\n logging.info(\"mu_irl: {}\".format(mu_irl))\n\n mu_list_ = np.array([mu.flatten() for mu in mu_list])\n mixture_weight_list = self._choose_mixture_weight(mu_list_, self._mu_expert)\n logging.info(\"mixture_weight_list: {}\".format(mixture_weight_list))\n\n # pi_best = MixturePolicy(mixture_weight_list, pi_list)\n pi_best = 0\n for w, p in zip(mixture_weight_list, pi_list):\n pi_best += w * p\n pi_best_list.append(pi_best)\n\n best_mu = mixture_weight_list.T.dot(mu_list_)\n w_best = self._mu_expert - best_mu\n w_best /= norm(w_best, 2)\n weight_best_list.append(w_best)\n\n # Do the evaluations\n for e in self._evaluators:\n the_metrics = e.evaluate(pi_best)\n for k, v in the_metrics.items():\n eval_metrics['best_' + k].append(v)\n the_metrics = e.evaluate(pi_irl)\n for k, v in the_metrics.items():\n eval_metrics[k].append(v)\n logging.info(\"eval_metrics: {}\".format(eval_metrics))\n\n results = {\n \"margin_v\": margin_v_list,\n \"margin_mu\": margin_mu_list,\n \"mu\": mu_list,\n \"weight\": weight_list,\n \"policy\": pi_list,\n \"policy_best\": pi_best_list,\n \"weight_best\": weight_best_list,\n }\n return results, eval_metrics\n\n def _choose_mixture_weight(self, mu_list, mu_exp):\n \"\"\"\n implement the choice of policy in\n Section 3.0 in Abbeel, Ng (2004)\n\n Parameters\n ----------\n mu_list : TODO\n\n Returns\n -------\n pi_best\n\n \"\"\"\n lamda = cvx.Variable(len(mu_list))\n\n obj = cvx.Minimize(cvx.norm(mu_exp - mu_list.T @ lamda, p=2))\n constraints = [lamda >= 0, sum(lamda) == 1]\n\n prob = cvx.Problem(obj, constraints)\n prob.solve()\n\n if prob.status in [\"unbounded\", \"infeasible\"]:\n logging.warning(\"the optimization failed: {}\".format(prob.status))\n\n weight_list = np.array(lamda.value).flatten()\n tol = 1e-6\n weight_list[np.abs(weight_list) < tol] = 0.0\n weight_list /= np.sum(weight_list)\n return weight_list\n\n def _optimize(self, mu_list):\n \"\"\"linearly parametrize reward function.\n\n implements Eq. 11 from Abbeel\n\n Parameters\n ----------\n W : weight\n\n Returns\n -------\n TODO\n - think whether to do s, a or just s\n\n \"\"\"\n logging.info(\"solving for W given mu_list\")\n # define variables\n W = cvx.Variable(self._p)\n t = cvx.Variable(1)\n\n if self._use_slack:\n xi = cvx.Variable(1)\n\n mu_exp = cvx.Parameter(self._p)\n mu_exp.value = self._mu_expert.flatten()\n\n if self._use_slack:\n C = cvx.Parameter(1)\n C.value = self._slack_scale\n obj = cvx.Maximize(t - C * xi)\n else:\n obj = cvx.Maximize(t)\n\n constraints = []\n\n for mu in mu_list:\n mu = mu.flatten()\n if self._use_slack:\n constraints += [W.T @ mu_exp + xi >= W.T @ mu + t]\n else:\n constraints += [W.T @ mu_exp >= W.T @ mu + t]\n constraints += [cvx.norm(W, 2) <= 1]\n\n prob = cvx.Problem(obj, constraints)\n prob.solve()\n\n if prob.status in [\"unbounded\", \"infeasible\"]:\n logging.warning(\"the optimization failed: {}\".format(prob.status))\n\n W = np.array(W.value)\n margin_v = t.value\n\n mu_list = np.array([mu.flatten() for mu in mu_list])\n margin_mu_list = norm(np.array(mu_exp.value).T - mu_list, 2, axis=1)\n margin_mu = np.min(margin_mu_list)\n\n converged = margin_mu <= self._irl_precision\n return W, (margin_v, margin_mu, converged)\n\n def _optimize_projection(self, mu_list, mu_bar_list):\n \"\"\"linearly parametrize reward function.\n\n implements Sec. 3.1 from Abbeel, Ng (2004)\n\n Parameters\n ----------\n W : weight\n\n Returns\n -------\n TODO\n - think whether to do s, a or just s\n\n \"\"\"\n mu_e = self._mu_expert\n mu_im1 = mu_list[-1]\n mu_bar_im2 = mu_bar_list[-1]\n\n if len(mu_bar_list) == 1:\n mu_bar_im1 = mu_list[-1]\n w_i = mu_e - mu_im1\n else:\n a = mu_im1 - mu_bar_im2\n b = mu_e - mu_bar_im2\n mu_bar_im1 = (mu_bar_im2 + a.T.dot(b) / norm(a)**2) * a\n w_i = mu_e - mu_bar_im1\n\n w_i /= np.linalg.norm(w_i, 2)\n t_i = np.linalg.norm(w_i, 2)\n\n margin_v = w_i.T.dot(mu_e - mu_bar_im1)\n margin_mu = t_i\n\n converged = margin_mu <= self._irl_precision\n return w_i, (margin_v, margin_mu, converged, mu_bar_im1)\n\n#\n# def train_mma(pi_0, phi_sa_dim, task_desc, params, D, evaluator, ob_space=None, ac_space=None):\n# gym.logger.setLevel(logging.WARN)\n#\n# gamma = task_desc[\"gamma\"]\n# horizon = task_desc[\"horizon\"]\n# eps = params[\"eps\"]\n# p = q = phi_sa_dim # adding action dim\n# phi = D[\"phi_fn\"]\n# phi_s = D[\"phi_fn_s\"]\n# stochastic = True\n# mu_estimator_type = params[\"mu_estimator\"]\n# n_action = task_desc[\"n_action\"]\n# assert isinstance(n_action, int)\n# action_list = range(n_action)\n# precision = params[\"precision\"]\n#\n# mu_exp_estimator = EmpiricalMuEstimator(phi, gamma)\n# mu_exp_estimator.fit(D, stochastic, return_s_init=True)\n# mu_exp, s_init_list = mu_exp_estimator.estimate()\n#\n#\n# logging.info(\"fitting {}\".format(mu_estimator_type))\n# if task_desc[\"type\"] == \"gym\":\n# env = gym.make(task_desc[\"env_id\"])\n# ac_space = env.action_space\n# ob_space = env.observation_space\n# mu_dim = p # only for discrete action\n# elif task_desc[\"type\"] == \"sepsis\":\n# if ac_space is None:\n# ac_space = (5, )\n# if ob_space is None:\n# ob_space = (46, )\n# mu_dim = p\n#\n# stochastic = True\n#\n# s = D[\"s\"]\n# a = D[\"a\"]\n# if len(a.shape) == 1:\n# a = np.expand_dims(a, axis=1)\n# s_next = D[\"s_next\"]\n# done = D[\"done\"]\n# if len(done.shape) == 1:\n# done = np.expand_dims(done, axis=1)\n# phi_sa = D[\"phi_sa\"]\n#\n# n_transition = D[\"s\"].shape[0]\n# idx = idx = int(n_transition * 0.7)\n#\n# D_train = {\"s\" : s[:idx, :],\n# \"a\" : a[:idx, :],\n# \"phi_sa\" : phi_sa[:idx, :],\n# \"s_next\": s_next[:idx, :],\n# \"done\": done[:idx, :]}\n#\n# D_val = {\"s\" : s[idx:, :],\n# \"a\" : a[idx:, :],\n# \"phi_sa\" : phi_sa[idx:, :],\n# \"s_next\": s_next[idx:, :],\n# \"done\": done[idx:, :]}\n#\n#\n# if mu_estimator_type == \"lstd\":\n# mu_estimator = LSTDMuEstimator(phi, gamma, D, p, q, eps, s_init_list)\n# elif mu_estimator_type == \"dsfn\":\n# mu_estimator = DeepMuEstimator(phi, gamma, D_train, D_val, s_init_list, ob_space,\n# ac_space, mu_dim, horizon)\n# else:\n# raise NotImplementedError\n#\n# mdp_solver = DQNSepsis(D=D_train)\n#\n# mma = MaxMarginAbbeel(pi_init=pi_0,\n# p=p,\n# phi=phi,\n# mu_exp=mu_exp,\n# mdp_solver=mdp_solver,\n# evaluator=evaluator,\n# irl_precision=params[\"precision\"],\n# method=params[\"method\"],\n# mu_estimator=mu_estimator,\n# stochastic=stochastic,\n# D_val=D_val)\n#\n# results = mma.run(n_iteration=params[\"n_iteration\"])\n# return results\n\n\n",
"\"\"\"\nDeep Reinforcement Learning: Deep Q-network (DQN)\nThis example is based on https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-\nSecond-Edition/blob/master/Chapter06/02_dqn_pong.py\nThe template illustrates using Lightning for Reinforcement Learning. The example builds a basic DQN using the\nclassic CartPole environment.\nTo run the template just run:\npython reinforce_learn_Qnet.py\nAfter ~1500 steps, you will see the total_reward hitting the max score of 200. Open up TensorBoard to\nsee the metrics:\ntensorboard --logdir default\n\"\"\"\n\nimport argparse\n\nfrom torch.utils.data import DataLoader\nfrom ...sepsis_simulator.dataset import SepsisExpertDataset\nfrom .base import Base_AIRLLightning, Base_AIRL_NODEGAM_Lightning\nfrom .disc import FCNN_Disc\n\n\nclass SepsisMixin(object):\n monitor_metric = 'val_a'\n monitor_mode = 'max'\n\n def _dataloader(self, split='train') -> DataLoader:\n \"\"\"Initialize the Replay Buffer dataset used for retrieving experiences\"\"\"\n\n dataset = SepsisExpertDataset(\n mdp=self.hparams.mdp,\n N=self.hparams.N,\n gamma=self.hparams.gamma,\n split=split,\n val_ratio=0.2,\n expert_pol=self.hparams.expert_pol,\n )\n dataloader = DataLoader(dataset=dataset,\n batch_size=self.hparams.batch_size,\n )\n return dataloader\n\n def train_dataloader(self) -> DataLoader:\n return self._dataloader('train')\n\n def val_dataloader(self) -> DataLoader:\n return self._dataloader('val')\n\n def test_dataloader(self) -> DataLoader:\n return self._dataloader('test')\n\n @classmethod\n def get_rs_loader(cls, args, rs=None):\n rs = super().get_rs_loader(args, rs=rs)\n\n # rs.add_rs_hparams('seed', short_name='s', chose_from=[321])\n rs.add_rs_hparams('seed', short_name='s', gen=lambda hparams: rs.np_gen.randint(200))\n rs.add_rs_hparams('batch_size', short_name='bs', chose_from=[512])\n rs.add_rs_hparams('noise', short_name='dn', chose_from=[0., 0.1])\n rs.add_rs_hparams('noise_epochs', short_name='dns',\n gen=lambda hparams: rs.np_gen.choice([0.1, 0.2]) if hparams.noise > 0 else 0)\n return rs\n\n @classmethod\n def add_model_specific_args(cls, parser) -> argparse.ArgumentParser:\n \"\"\"\n Adds arguments for DQN model\n Note: these params are fine tuned for Pong env\n Args:\n parent\n \"\"\"\n # Model\n parser.add_argument('--batch_size', type=int, default=512)\n parser.add_argument('--gamma', type=float, default=0.9,\n help='Decay rate in RL. Set it to 0.9 to encourage treating patients '\n 'earlier to leave the hospitals.')\n parser.add_argument('--noise', type=float, default=0.1)\n parser.add_argument('--noise_epochs', type=float, default=0.4)\n # Environment\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--patience', type=int, default=30)\n parser.add_argument('--mdp', type=str, choices=['original', 'gam', 'linear', 'cgam', 'clinear',\n 'cogam', 'colinear'],\n default='gam', help='How to generate reward.')\n parser.add_argument('--fold', type=int, default=0)\n parser.add_argument('--model_gamma', type=float, default=None,\n help='The gamma of the model. If None, same as gamma')\n parser.add_argument('--N', type=int, default=5000,\n help='Number of samples generated')\n parser.add_argument('--expert_pol', type=str, default='optimal',\n choices=['optimal', 'eps0.07', 'eps0.14'])\n parser = super().add_model_specific_args(parser)\n return parser\n\n def trainer_args(self):\n return dict()\n\n\nclass AIRLLightning(SepsisMixin, Base_AIRLLightning):\n pass\n\n\nclass AIRL_NODEGAM_Lightning(SepsisMixin, Base_AIRL_NODEGAM_Lightning):\n pass\n\n\nclass AIRL_FCNN_Lightning(SepsisMixin, Base_AIRLLightning):\n disc_model_cls = FCNN_Disc\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\nFile to build out a cohort using the ADMISSIONS, ICUSTAYS, and PATIENTS tables in raw mimic data.\n\n@author: josephfutoma\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\nfrom datetime import datetime\nimport pickle\nfrom data_load_utils import my_readcsv\n\nfrom time import time\n\nPATH_TO_REPO = \"./\"\n\n### options for building out cohort.\nlimit_first_ICU = True # do we limit to first ICU stay if there are several in an admission?\nlimit_first_admission = False #limit to first admission only?\nage_thresh = 18 #min age to be included\nLOS_lower_thresh = 12#LOS must be at least this long to be included; exclude very short stays\n\n# load in stuff\nmv_icuids_dat = my_readcsv(PATH_TO_REPO+'query-data/mv_icuids.csv')\n\n#may be many icu stays for one admission\nicustays_dat = my_readcsv(PATH_TO_REPO+'raw-data/ICUSTAYS.csv',\n\t\tusecols=['SUBJECT_ID','HADM_ID','ICUSTAY_ID','FIRST_CAREUNIT',\n\t\t'INTIME','OUTTIME'])\n\n#may be many admissions for one patient\nadmissions_dat = my_readcsv(PATH_TO_REPO+'raw-data/ADMISSIONS.csv',\n\tusecols=['SUBJECT_ID','HADM_ID','ADMITTIME','DISCHTIME','DEATHTIME','HOSPITAL_EXPIRE_FLAG',\n\t'ADMISSION_TYPE','ADMISSION_LOCATION','DISCHARGE_LOCATION',\n\t'INSURANCE','LANGUAGE','RELIGION','MARITAL_STATUS','ETHNICITY'])\n\npatients_dat = my_readcsv(PATH_TO_REPO+'raw-data/PATIENTS.csv',\n\tusecols=['SUBJECT_ID','GENDER','DOB','DOD','DOD_HOSP','DOD_SSN'])\n\n### merge these all together\ncohort_dat = patients_dat.merge(admissions_dat,'inner',on='SUBJECT_ID')\ncohort_dat = cohort_dat.merge(icustays_dat,'inner',on=['SUBJECT_ID','HADM_ID'])\ncohort_dat = mv_icuids_dat.merge(cohort_dat,'left',on='ICUSTAY_ID')\n\n# NOTE: not entirely sure what the difference is between these death dates...\n# mimic documentation claims HOSPITAL_EXPIRE_FLAG marks in-hospital mortality, not \n# sure what concordance among these deaths is...\ncohort_dat = cohort_dat.loc[:,['ICUSTAY_ID','HADM_ID','SUBJECT_ID',\n\t'INTIME','OUTTIME','ADMITTIME','DISCHTIME','DOB','DOD','DOD_HOSP',\n\t'DOD_SSN','DEATHTIME','HOSPITAL_EXPIRE_FLAG','GENDER','ADMISSION_TYPE','ADMISSION_LOCATION',\n\t'DISCHARGE_LOCATION','INSURANCE','LANGUAGE','RELIGION','MARITAL_STATUS',\n\t'ETHNICITY','FIRST_CAREUNIT']]\n\ncohort_dat['INTIME'] = pd.to_datetime(cohort_dat['INTIME'])\ncohort_dat['OUTTIME'] = pd.to_datetime(cohort_dat['OUTTIME'])\ncohort_dat['ADMITTIME'] = pd.to_datetime(cohort_dat['ADMITTIME'])\ncohort_dat['DISCHTIME'] = pd.to_datetime(cohort_dat['DISCHTIME'])\ncohort_dat['DOB'] = pd.to_datetime(cohort_dat['DOB'])\ncohort_dat['DOD'] = pd.to_datetime(cohort_dat['DOD'])\ncohort_dat['DOD_HOSP'] = pd.to_datetime(cohort_dat['DOD_HOSP'])\ncohort_dat['DOD_SSN'] = pd.to_datetime(cohort_dat['DOD_SSN'])\ncohort_dat['DEATHTIME'] = pd.to_datetime(cohort_dat['DEATHTIME'])\ncohort_dat['LOS'] = (cohort_dat['OUTTIME']-cohort_dat['INTIME']).dt.total_seconds()/60/60 #in hrs\n#get age in years...super janky, i will never figure out datetimes...\ncohort_dat['AGE'] = [x.total_seconds()/60/60/24/365.2422 for x in (np.array(cohort_dat['ADMITTIME'].dt.date) - np.array(cohort_dat['DOB'].dt.date))] \n\ncohort_dat = cohort_dat.sort_values(by=[\"ICUSTAY_ID\"]) \n\n# should be 23,386 ICU stays so far\n\n#get the ICU with earliest INTIME for each HADM_ID\nif limit_first_ICU:\n\tfirst_icu_stays_dat = cohort_dat.loc[:,['HADM_ID','INTIME']].groupby(['HADM_ID']).min().reset_index()\n\tcohort_dat = cohort_dat.merge(first_icu_stays_dat,'right',on=['HADM_ID','INTIME'])\n\t#if doing this, should be at 21,876 now\n\nif limit_first_admission:\n\tfirst_adm_dat = cohort_dat.loc[:,['SUBJECT_ID','ADMITTIME']].groupby(['SUBJECT_ID']).min().reset_index()\n\tcohort_dat = cohort_dat.merge(first_adm_dat,'right',on=['SUBJECT_ID','ADMITTIME'])\n\t#if doing this and also did first ICU stay, should be at 17,678 now.\n\t#however, i'm not sure how necessary it is to do this....ask Leo...?\n\t#TODO may be worth doing some EDA here\n\n\n#FILTER TO ADULTS \ncohort_dat = cohort_dat.loc[cohort_dat['AGE']>=age_thresh,:] \n\n#FILTER ON LOS \ncohort_dat = cohort_dat.loc[cohort_dat['LOS']>=LOS_lower_thresh,:]\n\n#should be at 21583 ICU stays, if limit to first ICU stay, do not limit to first admission, age>=18, LOS>=12\n\ncohort_dat.to_csv(PATH_TO_REPO+\"query-data/cohort.csv\",index=False)\n"
] | [
[
"numpy.abs",
"numpy.min",
"numpy.linalg.norm",
"numpy.array",
"numpy.sum"
],
[
"torch.utils.data.DataLoader"
],
[
"numpy.array",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
HackaGeo2021UFF/well-tie-challenge | [
"63af3d9b4cc63e78f9ec31ee3d0e2b231e65b195"
] | [
"src/wellTie.py"
] | [
"import json\nimport welly\nfrom welly import Well\nimport pandas as pd\nimport lasio\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\nfrom src.waveletChoice import *\nfrom src.seismicManipulation import *\n\ndef read_inputs(jpath):\n \"\"\"\n read_inputs reads the input json file and stores it information in a dictionary\n\n Parameters\n ----------\n jpath : string\n the input JSON file\n\n Returns\n -------\n paths: dict\n Returns a dictionary of the json file\n\n \"\"\"\n with open(jpath) as file:\n paths = json.load(file)\n return paths\n\ndef read_data(ui):\n \"\"\"\n read_data reads the input data and stores it in a dictionary\n\n Parameters\n ----------\n ui: dict\n A dictionary of the user inputs\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n \n # read well .las\n well = Well.from_las(ui['well'])\n ui['uwi'] = well.header['uwi']\n ui['well_name'] = well.header['name']\n\n # read cube seismic\n\n # dado do desafio, usar somente no ambiente remoto\n tr_seis, t_seis = seismic_trace = extract_seismic_trace(ui['well'], ui['seismic'])\n t_seis = t_seis/1e3\n\n # dado de exemplo, pode usar na máquina pessoal\n #df = pd.read_csv(ui['seismic'])\n #tr_seis, t_seis = df.cdp409.to_numpy() , df.time.to_numpy()\n \n seismic = pd.DataFrame({'t':t_seis, 'tr_synth':np.zeros(len(tr_seis)), 'tr_seis':tr_seis})\n\n # read wavelet\n if ui['wavelet'] == \"\":\n wavelet = None\n else: \n # wavelet = pd.read.csv(ui['wavelet']) \n wavelet = None\n\n data = {'well':well,'seismic':seismic, 'wavelet':wavelet}\n return data\n\ndef pre_processing_data(data):\n \"\"\"\n pre_processing_data pre-process the well DT and RHOB data with operations as:\n * despike\n * smooth\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n \n data['well'].data['DT'] = np.nan_to_num(data['well'].data['DT'])\n data['well'].data['RHOB'] = np.nan_to_num(data['well'].data['RHOB-EDIT'])\n #data['well'].data['RHOB'] = np.nan_to_num(data['well'].data['RHOB'])\n\n\n #unit convert to µs/m\n data['well'].data['DT'] = data['well'].data['DT'] / 0.3048 \n #unit convert to kg/m3 \n #data['well'].data['RHOB'] = data['well'].data['RHOB-EDIT'] * 1000\n data['well'].data['RHOB'] = data['well'].data['RHOB'] * 1000\n\n #Despiking\n #Sonic Despiking\n dt = data['well'].data['DT']\n data['well'].data['DT_DS'] = dt.despike(window_length=50, z=2)\n\n #Density Despiking\n den = data['well'].data['RHOB']\n data['well'].data['RHOB_DS'] = den.despike(window_length=50, z=2)\n\n #Smoothing \n #Sonic Smoothing\n dt_ds = data['well'].data['DT_DS']\n data['well'].data['DT_DS_SM'] = dt_ds.smooth(window_length=10, samples=False)\n\n #Density Smoothing\n den_ds = data['well'].data['RHOB_DS']\n data['well'].data['RHOB_DS_SM'] = den_ds.smooth(window_length=10, samples=False)\n data['well'] = data['well'].df()\n return data\n\ndef time_depth_relationship(data, ui):\n \"\"\"\n time_depth_relationship creates the time-depth relationship from the sonic (DT) log\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n ui: dict\n Returns a dictionary of the user inputs\n\n \"\"\"\n ### just an exemple\n ### TO DO: become smart\n log_start = data['well'].index[0] # Depth of logging starts(m) from header\n kb = ui['kb'] # Kelly Bushing elevation(m) from header\n gap_int = log_start - kb\n v_water = 1500\n t_water_botton = ui['t_water_botton']\n log_start_time = t_water_botton + 2*(log_start - v_water*t_water_botton/2)*(np.array(data['well']['DT'])[0]/1e6) \n\n #first replace NaN values with zero\n dt = data['well']['DT']\n dt_iterval = dt * 0.1524 / 1e6\n t_cum = np.cumsum(dt_iterval) * 2\n data['well']['TWT'] = t_cum + log_start_time\n return data\n\ndef ai(data):\n \"\"\"\n ai creates the accoustic impedance log\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n # Sonic velocity calculate\n Vsonic = []\n for value in data['well']['DT_DS_SM']:\n if value == 0:\n Vsonic.append(0)\n else:\n Vsonic.append(1e6/value)\n\n data['well']['Vsonic'] = np.array(Vsonic) #(unit: m/s)\n # AI calculate\n data['well']['AI'] = data['well']['Vsonic'] * data['well']['RHOB_DS_SM'] #(unit: kg/m2.s)\n return data\n\ndef rc_time(data):\n \"\"\"\n rc_time creates the Reflectivity Coefficients log in the time-domain\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n AI_tdom = np.interp(x=data['seismic']['t'].to_numpy(), xp = data['well'].TWT.to_numpy(), fp = data['well'].AI.to_numpy()) #resampling\n\n # again Rc calulation but in reampled time domain\n Rc_tdom = np.zeros(len(AI_tdom))\n for i in range(len(AI_tdom)-1):\n dem = AI_tdom[i]+AI_tdom[i+1]\n if dem == 0:\n Rc_tdom[i] = 0\n else:\n Rc_tdom[i] = (AI_tdom[i+1]-AI_tdom[i])/dem\n # to adjust vector size copy the last element to the tail\n Rc_tdom[-1] = Rc_tdom[-2]\n \n i = 0\n while Rc_tdom[i] == 0 and i < len(Rc_tdom):\n i += 1\n Rc_tdom[i] = Rc_tdom[i+1]\n\n i = len(Rc_tdom)-1\n while Rc_tdom[i] == 0 and i > 0:\n i -= 1\n Rc_tdom[i] = Rc_tdom[i-1]\n \n data['well_tdom'] = pd.DataFrame()\n data['well_tdom']['t'] = data['seismic']['t']\n data['well_tdom']['Rc_tdom'] = Rc_tdom\n data['well_tdom']['AI_tdom'] = AI_tdom\n\n return data\n\ndef synthetic_seismogram(data):\n \"\"\"\n synthetic_seismogram creates the synthetic seismogram\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n\n if data['wavelet'] == None:\n cc, freq, roll, phase = best_wavelet(data)\n w = ricker(freq, phase, data)\n else:\n w = data['wavelet']\n \n Rc_tdom = np.roll(data['well_tdom']['Rc_tdom'], roll)\n data['seismic']['tr_synth'] = np.convolve(w, Rc_tdom, mode='same')\n return data\n\ndef normalization(data):\n \"\"\"\n normalization normalizes the synthetic and seismic signals\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n data['seismic']['tr_synth'] = data['seismic']['tr_synth']/np.max(data['seismic']['tr_synth'])\n data['seismic']['tr_seis'] = data['seismic']['tr_seis']/np.max(data['seismic']['tr_seis'])\n return data\n\ndef export_data(data, ui):\n \"\"\"\n export_data exports data in the Decision Workspace format\n\n Parameters\n ----------\n data: dict\n A dictionary containing all the data that will be used throughout the code\n ui: dict\n A dictionary of the user inputs \n\n Returns\n -------\n data: dict\n Returns a dictionary containing all the data that will be used throughout the code\n\n \"\"\"\n \n if 'outputs' not in os.listdir():\n os.mkdir('outputs')\n\n result_path = ui['well_name'].strip().replace(\"/\",\"_\")\n\n twt = data['well']['TWT'].to_numpy()*1000\n twt = np.insert(twt, 0, 0)\n depth = data['well'].index.to_numpy()\n depth = np.insert(depth, 0, 0)\n\n t = data['seismic']['t'].to_numpy()*1000\n new_depth = np.interp(t,twt,depth)\n amp = data['seismic']['tr_synth'].to_numpy()\n\n with open('outputs/'+result_path+'_TD.dat','w') as file:\n file.write('TDP1 '+ ui['uwi'] + '\\n')\n file.write('TDP2 ' + ui['well_name'] + '\\n')\n \n line = 'TDP3 ' + ui['td_name'] + ' '*70\n line = line[:73]\n line += ' 0 TVDBTDD\\n'\n file.write(line)\n \n n = len(t)\n for i in range(n):\n line = 'TDP5 %.6f '%t[i]\n line = line[:21] \n line += '%.5f\\n'%new_depth[i]\n file.write(line)\n \n with open('outputs/'+result_path+'_synth.dat','w') as file:\n file.write('SYN1 '+ ui['uwi'] + '\\n')\n file.write('SYN2 ' + ui['well_name'] + '\\n')\n\n line = 'SYN3 ' + ui['synth_name'] + ' '*30\n line = line[:70]\n line += '4.0\\n'\n file.write(line)\n \n \n n = len(t)\n for i in range(n):\n line = 'SYN7 %.6f '%t[i]\n line = line[:28] \n line += '%.6f\\n'%amp[i]\n file.write(line)\n\n return None\n"
] | [
[
"numpy.convolve",
"numpy.nan_to_num",
"pandas.DataFrame",
"numpy.cumsum",
"numpy.max",
"numpy.insert",
"numpy.interp",
"numpy.array",
"numpy.roll"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Aniruddha-Tapas/seq2seq | [
"1592b842b652ae648b96c164bead38eb089ce08e",
"1592b842b652ae648b96c164bead38eb089ce08e"
] | [
"seq2seq/contrib/seq2seq/helper.py",
"seq2seq/encoders/rnn_encoder.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nIMPORTANT: This code is taken directly from Tensorflow\n(https://github.com/tensorflow/tensorflow) and is copied temporarily\nuntil it is available in a packaged Tensorflow version on pypi.\n\nTODO(dennybritz): Delete this code when it becomes available in TF.\n\nA library of helpers for use with SamplingDecoders.\n\"\"\"\n\n# pylint: skip-file\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.contrib.distributions.python.ops import categorical\n# from tensorflow.contrib.seq2seq.python.ops import decoder\nfrom seq2seq.contrib.seq2seq import decoder\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.util import nest\n\n__all__ = [\n \"Helper\",\n \"TrainingHelper\",\n \"GreedyEmbeddingHelper\",\n \"CustomHelper\",\n \"ScheduledEmbeddingTrainingHelper\",\n]\n\n_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Helper(object):\n \"\"\"Helper interface. Helper instances are used by SamplingDecoder.\"\"\"\n\n @abc.abstractproperty\n def batch_size(self):\n \"\"\"Returns a scalar int32 tensor.\"\"\"\n raise NotImplementedError(\"batch_size has not been implemented\")\n\n @abc.abstractmethod\n def initialize(self, name=None):\n \"\"\"Returns `(initial_finished, initial_inputs)`.\"\"\"\n pass\n\n @abc.abstractmethod\n def sample(self, time, outputs, state, name=None):\n \"\"\"Returns `sample_ids`.\"\"\"\n pass\n\n @abc.abstractmethod\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n \"\"\"Returns `(finished, next_inputs, next_state)`.\"\"\"\n pass\n\n\nclass CustomHelper(Helper):\n \"\"\"Base abstract class that allows the user to customize sampling.\"\"\"\n\n def __init__(self, initialize_fn, sample_fn, next_inputs_fn):\n \"\"\"Initializer.\n\n Args:\n initialize_fn: callable that returns `(finished, next_inputs)`\n for the first iteration.\n sample_fn: callable that takes `(time, outputs, state)`\n and emits tensor `sample_ids`.\n next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`\n and emits `(finished, next_inputs, next_state)`.\n \"\"\"\n self._initialize_fn = initialize_fn\n self._sample_fn = sample_fn\n self._next_inputs_fn = next_inputs_fn\n self._batch_size = None\n\n @property\n def batch_size(self):\n if self._batch_size is None:\n raise ValueError(\"batch_size accessed before initialize was called\")\n return self._batch_size\n\n def initialize(self, name=None):\n with ops.name_scope(name, \"%sInitialize\" % type(self).__name__):\n (finished, next_inputs) = self._initialize_fn()\n if self._batch_size is None:\n self._batch_size = array_ops.size(finished)\n return (finished, next_inputs)\n\n def sample(self, time, outputs, state, name=None):\n with ops.name_scope(name, \"%sSample\" % type(self).__name__,\n (time, outputs, state)):\n return self._sample_fn(time=time, outputs=outputs, state=state)\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n with ops.name_scope(name, \"%sNextInputs\" % type(self).__name__,\n (time, outputs, state)):\n return self._next_inputs_fn(\n time=time, outputs=outputs, state=state, sample_ids=sample_ids)\n\n\nclass TrainingHelper(Helper):\n \"\"\"A helper for use during training. Only reads inputs.\n\n Returned sample_ids are the argmax of the RNN output logits.\n \"\"\"\n\n def __init__(self, inputs, sequence_length, time_major=False, name=None):\n \"\"\"Initializer.\n\n Args:\n inputs: A (structure of) input tensors.\n sequence_length: An int32 vector tensor.\n time_major: Python bool. Whether the tensors in `inputs` are time major.\n If `False` (default), they are assumed to be batch major.\n name: Name scope for any created operations.\n\n Raises:\n ValueError: if `sequence_length` is not a 1D tensor.\n \"\"\"\n with ops.name_scope(name, \"TrainingHelper\", [inputs, sequence_length]):\n inputs = ops.convert_to_tensor(inputs, name=\"inputs\")\n if not time_major:\n inputs = nest.map_structure(_transpose_batch_time, inputs)\n\n def _unstack_ta(inp):\n return tensor_array_ops.TensorArray(\n dtype=inp.dtype,\n size=array_ops.shape(inp)[0],\n element_shape=inp.get_shape()[1:]).unstack(inp)\n\n self._input_tas = nest.map_structure(_unstack_ta, inputs)\n self._sequence_length = ops.convert_to_tensor(\n sequence_length, name=\"sequence_length\")\n if self._sequence_length.get_shape().ndims != 1:\n raise ValueError(\n \"Expected sequence_length to be a vector, but received shape: %s\" %\n self._sequence_length.get_shape())\n\n self._zero_inputs = nest.map_structure(\n lambda inp: array_ops.zeros_like(inp[0, :]), inputs)\n\n self._batch_size = array_ops.size(sequence_length)\n\n @property\n def batch_size(self):\n return self._batch_size\n\n def initialize(self, name=None):\n with ops.name_scope(name, \"TrainingHelperInitialize\"):\n finished = math_ops.equal(0, self._sequence_length)\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: self._zero_inputs,\n lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))\n return (finished, next_inputs)\n\n def sample(self, time, outputs, name=None, **unused_kwargs):\n with ops.name_scope(name, \"TrainingHelperSample\", [time, outputs]):\n sample_ids = math_ops.cast(\n math_ops.argmax(\n outputs, axis=-1), dtypes.int32)\n return sample_ids\n\n def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):\n \"\"\"next_inputs_fn for TrainingHelper.\"\"\"\n with ops.name_scope(name, \"TrainingHelperNextInputs\",\n [time, outputs, state]):\n next_time = time + 1\n finished = (next_time >= self._sequence_length)\n all_finished = math_ops.reduce_all(finished)\n\n def read_from_ta(inp):\n return inp.read(next_time)\n\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: self._zero_inputs,\n lambda: nest.map_structure(read_from_ta, self._input_tas))\n return (finished, next_inputs, state)\n\n\nclass ScheduledEmbeddingTrainingHelper(TrainingHelper):\n \"\"\"A training helper that adds scheduled sampling.\n\n Returns -1s for sample_ids where no sampling took place; valid sample id\n values elsewhere.\n \"\"\"\n\n def __init__(self,\n inputs,\n sequence_length,\n embedding,\n sampling_probability,\n time_major=False,\n seed=None,\n scheduling_seed=None,\n name=None):\n \"\"\"Initializer.\n\n Args:\n inputs: A (structure of) input tensors.\n sequence_length: An int32 vector tensor.\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\n or the `params` argument for `embedding_lookup`.\n sampling_probability: A 0D `float32` tensor: the probability of sampling\n categorically from the output ids instead of reading directly from the\n inputs.\n time_major: Python bool. Whether the tensors in `inputs` are time major.\n If `False` (default), they are assumed to be batch major.\n seed: The sampling seed.\n scheduling_seed: The schedule decision rule sampling seed.\n name: Name scope for any created operations.\n\n Raises:\n ValueError: if `sampling_probability` is not a scalar or vector.\n \"\"\"\n with ops.name_scope(name, \"ScheduledEmbeddingSamplingWrapper\",\n [embedding, sampling_probability]):\n if callable(embedding):\n self._embedding_fn = embedding\n else:\n self._embedding_fn = (\n lambda ids: embedding_ops.embedding_lookup(embedding, ids))\n self._sampling_probability = ops.convert_to_tensor(\n sampling_probability, name=\"sampling_probability\")\n if self._sampling_probability.get_shape().ndims not in (0, 1):\n raise ValueError(\n \"sampling_probability must be either a scalar or a vector. \"\n \"saw shape: %s\" % (self._sampling_probability.get_shape()))\n self._seed = seed\n self._scheduling_seed = scheduling_seed\n super(ScheduledEmbeddingTrainingHelper, self).__init__(\n inputs=inputs,\n sequence_length=sequence_length,\n time_major=time_major,\n name=name)\n\n def initialize(self, name=None):\n return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)\n\n def sample(self, time, outputs, state, name=None):\n with ops.name_scope(name, \"ScheduledEmbeddingTrainingHelperSample\",\n [time, outputs, state]):\n # Return -1s where we did not sample, and sample_ids elsewhere\n select_sample_noise = random_ops.random_uniform(\n [self.batch_size], seed=self._scheduling_seed)\n select_sample = (self._sampling_probability > select_sample_noise)\n sample_id_sampler = categorical.Categorical(logits=outputs)\n return array_ops.where(\n select_sample,\n sample_id_sampler.sample(seed=self._seed),\n array_ops.tile([-1], [self.batch_size]))\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n with ops.name_scope(name, \"ScheduledEmbeddingTrainingHelperSample\",\n [time, outputs, state, sample_ids]):\n (finished, base_next_inputs, state) = (\n super(ScheduledEmbeddingTrainingHelper, self).next_inputs(\n time=time,\n outputs=outputs,\n state=state,\n sample_ids=sample_ids,\n name=name))\n\n def maybe_sample():\n \"\"\"Perform scheduled sampling.\"\"\"\n where_sampling = math_ops.cast(\n array_ops.where(sample_ids > -1), dtypes.int32)\n where_not_sampling = math_ops.cast(\n array_ops.where(sample_ids <= -1), dtypes.int32)\n where_sampling_flat = array_ops.reshape(where_sampling, [-1])\n where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])\n sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)\n inputs_not_sampling = array_ops.gather(base_next_inputs,\n where_not_sampling_flat)\n sampled_next_inputs = self._embedding_fn(sample_ids_sampling)\n base_shape = array_ops.shape(base_next_inputs)\n return (array_ops.scatter_nd(\n indices=where_sampling,\n updates=sampled_next_inputs,\n shape=base_shape) + array_ops.scatter_nd(\n indices=where_not_sampling,\n updates=inputs_not_sampling,\n shape=base_shape))\n\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished, lambda: base_next_inputs, maybe_sample)\n return (finished, next_inputs, state)\n\n\nclass GreedyEmbeddingHelper(Helper):\n \"\"\"A helper for use during inference.\n\n Uses the argmax of the output (treated as logits) and passes the\n result through an embedding layer to get the next input.\n \"\"\"\n\n def __init__(self, embedding, start_tokens, end_token):\n \"\"\"Initializer.\n\n Args:\n embedding: A callable that takes a vector tensor of `ids` (argmax ids),\n or the `params` argument for `embedding_lookup`.\n start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.\n end_token: `int32` scalar, the token that marks end of decoding.\n\n Raises:\n ValueError: if `sequence_length` is not a 1D tensor.\n \"\"\"\n if callable(embedding):\n self._embedding_fn = embedding\n else:\n self._embedding_fn = (\n lambda ids: embedding_ops.embedding_lookup(embedding, ids))\n\n self._start_tokens = ops.convert_to_tensor(\n start_tokens, dtype=dtypes.int32, name=\"start_tokens\")\n self._end_token = ops.convert_to_tensor(\n end_token, dtype=dtypes.int32, name=\"end_token\")\n if self._start_tokens.get_shape().ndims != 1:\n raise ValueError(\"start_tokens must be a vector\")\n self._batch_size = array_ops.size(start_tokens)\n if self._end_token.get_shape().ndims != 0:\n raise ValueError(\"end_token must be a scalar\")\n self._start_inputs = self._embedding_fn(self._start_tokens)\n\n @property\n def batch_size(self):\n return self._batch_size\n\n def initialize(self, name=None):\n finished = array_ops.tile([False], [self._batch_size])\n return (finished, self._start_inputs)\n\n def sample(self, time, outputs, state, name=None):\n \"\"\"sample for GreedyEmbeddingHelper.\"\"\"\n del time, state # unused by sample_fn\n # Outputs are logits, use argmax to get the most probable id\n if not isinstance(outputs, ops.Tensor):\n raise TypeError(\"Expected outputs to be a single Tensor, got: %s\" %\n outputs)\n sample_ids = math_ops.cast(math_ops.argmax(outputs, axis=-1), dtypes.int32)\n return sample_ids\n\n def next_inputs(self, time, outputs, state, sample_ids, name=None):\n \"\"\"next_inputs_fn for GreedyEmbeddingHelper.\"\"\"\n del time, outputs # unused by next_inputs_fn\n finished = math_ops.equal(sample_ids, self._end_token)\n all_finished = math_ops.reduce_all(finished)\n next_inputs = control_flow_ops.cond(\n all_finished,\n # If we're finished, the next_inputs value doesn't matter\n lambda: self._start_inputs,\n lambda: self._embedding_fn(sample_ids))\n return (finished, next_inputs, state)\n",
"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCollection of RNN encoders.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport tensorflow as tf\nfrom tensorflow.contrib.rnn.python.ops import rnn\n\nfrom seq2seq.encoders.encoder import Encoder, EncoderOutput\nfrom seq2seq.training import utils as training_utils\n\n\ndef _unpack_cell(cell):\n \"\"\"Unpack the cells because the stack_bidirectional_dynamic_rnn\n expects a list of cells, one per layer.\"\"\"\n if isinstance(cell, tf.contrib.rnn.MultiRNNCell):\n return cell._cells #pylint: disable=W0212\n else:\n return [cell]\n\n\ndef _default_rnn_cell_params():\n \"\"\"Creates default parameters used by multiple RNN encoders.\n \"\"\"\n return {\n \"cell_class\": \"BasicLSTMCell\",\n \"cell_params\": {\n \"num_units\": 128\n },\n \"dropout_input_keep_prob\": 1.0,\n \"dropout_output_keep_prob\": 1.0,\n \"num_layers\": 1,\n \"residual_connections\": False,\n \"residual_combiner\": \"add\",\n \"residual_dense\": False\n }\n\n\ndef _toggle_dropout(cell_params, mode):\n \"\"\"Disables dropout during eval/inference mode\n \"\"\"\n cell_params = copy.deepcopy(cell_params)\n if mode != tf.contrib.learn.ModeKeys.TRAIN:\n cell_params[\"dropout_input_keep_prob\"] = 1.0\n cell_params[\"dropout_output_keep_prob\"] = 1.0\n return cell_params\n\n\nclass UnidirectionalRNNEncoder(Encoder):\n \"\"\"\n A unidirectional RNN encoder. Stacking should be performed as\n part of the cell.\n\n Args:\n cell: An instance of tf.contrib.rnn.RNNCell\n name: A name for the encoder\n \"\"\"\n\n def __init__(self, params, mode, name=\"forward_rnn_encoder\"):\n super(UnidirectionalRNNEncoder, self).__init__(params, mode, name)\n self.params[\"rnn_cell\"] = _toggle_dropout(self.params[\"rnn_cell\"], mode)\n\n @staticmethod\n def default_params():\n return {\"rnn_cell\": _default_rnn_cell_params()}\n\n def encode(self, inputs, sequence_length, **kwargs):\n cell = training_utils.get_rnn_cell(**self.params[\"rnn_cell\"])\n outputs, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=inputs,\n sequence_length=sequence_length,\n dtype=tf.float32,\n **kwargs)\n return EncoderOutput(\n outputs=outputs,\n final_state=state,\n attention_values=outputs,\n attention_values_length=sequence_length)\n\n\nclass BidirectionalRNNEncoder(Encoder):\n \"\"\"\n A bidirectional RNN encoder. Uses the same cell for both the\n forward and backward RNN. Stacking should be performed as part of\n the cell.\n\n Args:\n cell: An instance of tf.contrib.rnn.RNNCell\n name: A name for the encoder\n \"\"\"\n\n def __init__(self, params, mode, name=\"bidi_rnn_encoder\"):\n super(BidirectionalRNNEncoder, self).__init__(params, mode, name)\n self.params[\"rnn_cell\"] = _toggle_dropout(self.params[\"rnn_cell\"], mode)\n\n @staticmethod\n def default_params():\n return {\"rnn_cell\": _default_rnn_cell_params()}\n\n def encode(self, inputs, sequence_length, **kwargs):\n cell_fw = training_utils.get_rnn_cell(**self.params[\"rnn_cell\"])\n cell_bw = training_utils.get_rnn_cell(**self.params[\"rnn_cell\"])\n outputs, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=inputs,\n sequence_length=sequence_length,\n dtype=tf.float32,\n **kwargs)\n\n # Concatenate outputs and states of the forward and backward RNNs\n outputs_concat = tf.concat(outputs, 2)\n\n return EncoderOutput(\n outputs=outputs_concat,\n final_state=states,\n attention_values=outputs_concat,\n attention_values_length=sequence_length)\n\n\nclass StackBidirectionalRNNEncoder(Encoder):\n \"\"\"\n A stacked bidirectional RNN encoder. Uses the same cell for both the\n forward and backward RNN. Stacking should be performed as part of\n the cell.\n\n Args:\n cell: An instance of tf.contrib.rnn.RNNCell\n name: A name for the encoder\n \"\"\"\n\n def __init__(self, params, mode, name=\"stacked_bidi_rnn_encoder\"):\n super(StackBidirectionalRNNEncoder, self).__init__(params, mode, name)\n self.params[\"rnn_cell\"] = _toggle_dropout(self.params[\"rnn_cell\"], mode)\n\n @staticmethod\n def default_params():\n return {\"rnn_cell\": _default_rnn_cell_params()}\n\n def encode(self, inputs, sequence_length, **kwargs):\n cell_fw = training_utils.get_rnn_cell(**self.params[\"rnn_cell\"])\n cell_bw = training_utils.get_rnn_cell(**self.params[\"rnn_cell\"])\n\n cells_fw = _unpack_cell(cell_fw)\n cells_bw = _unpack_cell(cell_bw)\n\n result = rnn.stack_bidirectional_dynamic_rnn(\n cells_fw=cells_fw,\n cells_bw=cells_bw,\n inputs=inputs,\n dtype=tf.float32,\n sequence_length=sequence_length,\n **kwargs)\n outputs_concat, _output_state_fw, _output_state_bw = result\n final_state = (_output_state_fw, _output_state_bw)\n return EncoderOutput(\n outputs=outputs_concat,\n final_state=final_state,\n attention_values=outputs_concat,\n attention_values_length=sequence_length)\n"
] | [
[
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.contrib.distributions.python.ops.categorical.Categorical",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.array_ops.scatter_nd",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.ops.random_ops.random_uniform"
],
[
"tensorflow.nn.dynamic_rnn",
"tensorflow.concat",
"tensorflow.contrib.rnn.python.ops.rnn.stack_bidirectional_dynamic_rnn",
"tensorflow.nn.bidirectional_dynamic_rnn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.0"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
starcatch1/book | [
"3d4477c2e624e291b2081b944c9589b976211dcf",
"3d4477c2e624e291b2081b944c9589b976211dcf"
] | [
"ch19/day06/07.py",
"ch15/12.py"
] | [
"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ndef get_financial_statements(code):\n url = \"http://companyinfo.stock.naver.com/v1/company/ajax/cF1001.aspx?cmp_cd=%s&fin_typ=0&freq_typ=Y\" % (code)\n html = requests.get(url).text\n\n html = html.replace('<th class=\"bg r01c02 endLine line-bottom\"colspan=\"8\">연간</th>', \"\")\n html = html.replace(\"<span class='span-sub'>(IFRS연결)</span>\", \"\")\n html = html.replace(\"<span class='span-sub'>(IFRS별도)</span>\", \"\")\n html = html.replace(\"<span class='span-sub'>(GAAP개별)</span>\", \"\")\n html = html.replace('\\t', '')\n html = html.replace('\\n', '')\n html = html.replace('\\r', '')\n\n for year in range(2009, 2018):\n for month in range(6, 13):\n month = \"/%02d\" % month\n html = html.replace(str(year) + month, str(year))\n\n for month in range(1, 6):\n month = \"/%02d\" % month\n html = html.replace(str(year+1) + month, str(year))\n\n html = html.replace(str(year) + '(E)', str(year))\n\n df_list = pd.read_html(html, index_col='주요재무정보')\n df = df_list[0]\n return df\n\ndef get_3year_treasury():\n url = \"http://www.index.go.kr/strata/jsp/showStblGams3.jsp?stts_cd=288401&idx_cd=2884&freq=Y&period=1998:2016\"\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'lxml')\n tr_data = soup.find_all('tr', id='tr_288401_1')\n td_data = tr_data[0].find_all('td')\n\n treasury_3year = {}\n start_year = 1998\n\n for x in td_data:\n treasury_3year[start_year] = x.text\n start_year += 1\n\n print(treasury_3year)\n return treasury_3year\n\nif __name__ == \"__main__\":\n #df = get_financial_statements('035720')\n #print(df)\n get_3year_treasury()\n\n\n",
"import pandas_datareader.data as web\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.finance as matfin\n\nstart = datetime.datetime(2016, 3, 1)\nend = datetime.datetime(2016, 3, 31)\n\nskhynix = web.DataReader(\"000660.KS\", \"yahoo\", start, end)\nskhynix = skhynix[skhynix['Volume'] > 0]\n\nfig = plt.figure(figsize=(12, 8))\nax = fig.add_subplot(111)\n\nmatfin.candlestick2_ohlc(ax, skhynix['Open'], skhynix['High'], skhynix['Low'], skhynix['Close'],\n width=0.5, colorup='r', colordown='b')\nplt.show()"
] | [
[
"pandas.read_html"
],
[
"matplotlib.finance.candlestick2_ohlc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rpindale/pytorch | [
"6a085648d81ce88ff59d6d1438fdb3707a0d6fb7",
"6a085648d81ce88ff59d6d1438fdb3707a0d6fb7"
] | [
"test/quantization/core/test_workflow_module.py",
"test/distributed/elastic/multiprocessing/errors/api_test.py"
] | [
"# Torch\nimport torch\nfrom torch.quantization import (\n MinMaxObserver,\n PerChannelMinMaxObserver,\n MovingAverageMinMaxObserver,\n MovingAveragePerChannelMinMaxObserver,\n HistogramObserver,\n RecordingObserver,\n PlaceholderObserver,\n NoopObserver,\n FakeQuantize,\n FixedQParamsFakeQuantize,\n default_debug_qconfig,\n default_observer,\n default_histogram_observer,\n default_per_channel_weight_observer,\n get_observer_dict,\n prepare,\n QConfig,\n)\n\n\nimport torch.nn as nn\n\n# Standard library\nimport copy\nimport io\nimport itertools\nimport unittest\nimport math\nimport numpy as np\n\n# Testing utils\nfrom hypothesis import given, settings\nfrom hypothesis import strategies as st\nimport torch.testing._internal.hypothesis_utils as hu\nhu.assert_deadline_disabled()\nfrom torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA\nfrom torch.testing._internal.common_utils import TestCase\nfrom torch.testing._internal.common_quantization import (\n QuantizationTestCase,\n AnnotatedSingleLayerLinearModel,\n test_only_eval_fn,\n SingleLayerLinearModel,\n)\n\nfrom torch.testing._internal.common_quantized import (\n override_quantized_engine,\n supported_qengines,\n override_qengines,\n _fake_quantize_per_channel_affine_reference,\n _fake_quantize_per_channel_affine_grad_reference,\n to_tensor,\n)\n\nNP_RANDOM_SEED = 19\ntolerance = 1e-6\n\nclass TestObserver(QuantizationTestCase):\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),\n reduce_range=st.booleans())\n def test_per_tensor_observers(self, qdtype, qscheme, reduce_range):\n # reduce_range cannot be true for symmetric quantization with uint8\n if qdtype == torch.quint8 and qscheme == torch.per_tensor_symmetric:\n reduce_range = False\n ObserverList = [MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range),\n MovingAverageMinMaxObserver(averaging_constant=0.5,\n dtype=qdtype,\n qscheme=qscheme,\n reduce_range=reduce_range)]\n for myobs in ObserverList:\n # Calculate Qparams should return with a warning for observers with no data\n qparams = myobs.calculate_qparams()\n if type(myobs) == MinMaxObserver:\n x = torch.tensor([1.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n y = torch.tensor([4.0, 5.0, 5.0, 6.0, 7.0, 8.0])\n else:\n # Moving average of min/max for x and y matches that of\n # extreme values for x/y used for minmax observer\n x = torch.tensor([0.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n y = torch.tensor([2.0, 5.0, 5.0, 6.0, 7.0, 10.0])\n\n result = myobs(x)\n result = myobs(y)\n self.assertEqual(result, y)\n self.assertEqual(myobs.min_val, 1.0)\n self.assertEqual(myobs.max_val, 8.0)\n qparams = myobs.calculate_qparams()\n if reduce_range:\n if qscheme == torch.per_tensor_symmetric:\n ref_scale = 0.062745 * 255 / 127\n ref_zero_point = 0 if qdtype is torch.qint8 else 128\n else:\n ref_scale = 0.0313725 * 255 / 127\n ref_zero_point = -64 if qdtype is torch.qint8 else 0\n else:\n if qscheme == torch.per_tensor_symmetric:\n ref_scale = 0.062745\n ref_zero_point = 0 if qdtype is torch.qint8 else 128\n else:\n ref_scale = 0.0313725\n ref_zero_point = -128 if qdtype is torch.qint8 else 0\n self.assertEqual(qparams[1].item(), ref_zero_point)\n self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)\n state_dict = myobs.state_dict()\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_dict[key])\n loaded_obs = MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)\n loaded_obs.load_state_dict(loaded_dict)\n loaded_qparams = loaded_obs.calculate_qparams()\n self.assertEqual(myobs.min_val, loaded_obs.min_val)\n self.assertEqual(myobs.max_val, loaded_obs.max_val)\n self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())\n\n\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from((torch.per_channel_affine, torch.per_channel_symmetric, torch.per_channel_affine_float_qparams)),\n ch_axis=st.sampled_from((0, 1, 2, 3)), reduce_range=st.booleans())\n def test_per_channel_observers(self, qdtype, qscheme, ch_axis, reduce_range):\n # reduce_range cannot be true for symmetric quantization with uint8\n if qscheme == torch.per_channel_affine_float_qparams:\n reduce_range = False\n if qdtype == torch.quint8 and qscheme == torch.per_channel_symmetric:\n reduce_range = False\n ObserverList = [PerChannelMinMaxObserver(reduce_range=reduce_range,\n ch_axis=ch_axis,\n dtype=qdtype,\n qscheme=qscheme),\n MovingAveragePerChannelMinMaxObserver(averaging_constant=0.5,\n reduce_range=reduce_range,\n ch_axis=ch_axis,\n dtype=qdtype,\n qscheme=qscheme)]\n\n for myobs in ObserverList:\n # Calculate qparams should work for empty observers\n qparams = myobs.calculate_qparams()\n x = torch.tensor(\n [\n [[[1.0, 2.0], [2.0, 2.5]], [[3.0, 4.0], [4.5, 6.0]]],\n [[[-4.0, -3.0], [5.0, 5.0]], [[6.0, 3.0], [7.0, 8.0]]],\n ]\n )\n if type(myobs) == MovingAveragePerChannelMinMaxObserver:\n # Scaling the input tensor to model change in min/max values\n # across batches\n result = myobs(0.5 * x)\n result = myobs(1.5 * x)\n self.assertEqual(result, 1.5 * x)\n else:\n result = myobs(x)\n self.assertEqual(result, x)\n\n qparams = myobs.calculate_qparams()\n ref_min_vals = [[1.0, -4.0], [-4.0, 3.0], [-4.0, 2.0], [-4.0, -3.0]]\n ref_max_vals = [[6.0, 8.0], [5.0, 8.0], [6.0, 8.0], [7.0, 8.0]]\n per_channel_symmetric_ref_scales = [\n [0.04705882, 0.06274509],\n [0.03921569, 0.0627451],\n [0.04705882, 0.0627451],\n [0.05490196, 0.0627451],\n ]\n per_channel_affine_ref_scales = [\n [0.02352941, 0.04705882],\n [0.03529412, 0.03137255],\n [0.03921569, 0.03137255],\n [0.04313726, 0.04313726],\n ]\n per_channel_affine_qint8_zp = [\n [-128, -43],\n [-15, -128],\n [-26, -128],\n [-35, -58],\n ]\n per_channel_affine_float_qparams_ref_scales = [\n [0.0196, 0.0471],\n [0.0353, 0.0196],\n [0.0392, 0.0235],\n [0.0431, 0.0431],\n ]\n per_channel_affine_quint8_zp = [[0, 85], [113, 0], [102, 0], [93, 70]]\n\n self.assertEqual(myobs.min_vals, ref_min_vals[ch_axis])\n self.assertEqual(myobs.max_vals, ref_max_vals[ch_axis])\n if qscheme == torch.per_channel_symmetric:\n ref_scales = per_channel_symmetric_ref_scales[ch_axis]\n ref_zero_points = [0, 0] if qdtype is torch.qint8 else [128, 128]\n elif qscheme == torch.per_channel_affine_float_qparams:\n ref_scales = per_channel_affine_float_qparams_ref_scales[ch_axis]\n ref_zero_points = [-1 * ref_min_vals[ch_axis][i] / ref_scales[i] for i in range(len(ref_scales))]\n else:\n ref_scales = per_channel_affine_ref_scales[ch_axis]\n ref_zero_points = (\n per_channel_affine_qint8_zp[ch_axis]\n if qdtype is torch.qint8\n else per_channel_affine_quint8_zp[ch_axis]\n )\n\n if reduce_range:\n ref_scales = [s * 255 / 127 for s in ref_scales]\n ref_zero_points = [math.floor(z / 2) for z in ref_zero_points]\n self.assertTrue(torch.allclose(qparams[0], torch.tensor(ref_scales, dtype=qparams[0].dtype), atol=0.0001))\n if qscheme == torch.per_channel_affine_float_qparams:\n self.assertTrue(torch.allclose(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype), atol=1))\n else:\n self.assertTrue(torch.allclose(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype)))\n\n\n # Test for serializability\n state_dict = myobs.state_dict()\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_dict[key])\n loaded_obs = PerChannelMinMaxObserver(reduce_range=reduce_range, ch_axis=ch_axis, dtype=qdtype, qscheme=qscheme)\n loaded_obs.load_state_dict(loaded_dict)\n loaded_qparams = loaded_obs.calculate_qparams()\n self.assertEqual(myobs.min_vals, loaded_obs.min_vals)\n self.assertEqual(myobs.max_vals, loaded_obs.max_vals)\n self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())\n\n\n def test_observer_scriptable(self):\n obs_list = [MinMaxObserver(), MovingAverageMinMaxObserver()]\n for obs in obs_list:\n scripted = torch.jit.script(obs)\n\n x = torch.rand(3, 4)\n obs(x)\n scripted(x)\n self.assertEqual(obs.calculate_qparams(), scripted.calculate_qparams())\n\n buf = io.BytesIO()\n torch.jit.save(scripted, buf)\n buf.seek(0)\n loaded = torch.jit.load(buf)\n self.assertEqual(obs.calculate_qparams(), loaded.calculate_qparams())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @override_qengines\n def test_state_dict_respects_device_affinity(self):\n \"\"\"\n Tests that loading from a state dict loads buffers to the correct\n device.\n \"\"\"\n device_cpu = torch.device('cpu')\n device_cuda = torch.device('cuda:0')\n test_cases = itertools.product(\n [device_cpu, device_cuda],\n [device_cpu, device_cuda],\n [MinMaxObserver, MovingAverageMinMaxObserver,\n PerChannelMinMaxObserver,\n MovingAveragePerChannelMinMaxObserver,\n # TODO: enable this (separate PR)\n # HistogramObserver,\n PlaceholderObserver, RecordingObserver, NoopObserver,\n FakeQuantize])\n\n for device_source, device_target, obs_cls in test_cases:\n # calibrated source model\n model = obs_cls()\n model.to(device_source)\n model(torch.randn(4, 1, 4, 4, device=device_source))\n # target model\n model2 = obs_cls()\n model2.to(device_target)\n model2.load_state_dict(model.state_dict())\n # verify that buffers stayed on model2's device\n model_devices = {p.device for p in model2.parameters()} | \\\n {p.device for p in model2.buffers()}\n # some observers do not have any buffers, so lessEqual instead of\n # Equal\n self.assertLessEqual(len(model_devices), 1)\n if len(model_devices) == 1:\n model_device = next(iter(model_devices))\n self.assertEqual(model_device, device_target)\n\n def test_histogram_observer_consistent_buffer_shape(self):\n \"\"\"\n Ensures that the buffer shapes do not change from uninitialized to\n initialized states for HistogramObserver.\n \"\"\"\n obs = HistogramObserver()\n min_shape_before = obs.min_val.shape\n max_shape_before = obs.max_val.shape\n for _ in range(2):\n obs(torch.randn(4, 4, 4, 4))\n self.assertEqual(min_shape_before, obs.min_val.shape)\n self.assertEqual(max_shape_before, obs.max_val.shape)\n\n def test_histogram_observer_save_load_state_dict(self):\n \"\"\"\n Smoke test on saving/loading state_dict\n \"\"\"\n obs1 = HistogramObserver()\n obs1(torch.randn(4, 4, 4, 4))\n obs2 = HistogramObserver()\n obs2.load_state_dict(obs1.state_dict())\n self.assertEqual(obs2.min_val.shape, torch.Size([]))\n self.assertEqual(obs2.max_val.shape, torch.Size([]))\n\n\n def test_save_load_state_dict_script(self):\n \"\"\"\n Tests that we can save and load state_dict for observers that are scripted\n in a quantized model.\n \"\"\"\n obs_list = [MinMaxObserver, MovingAverageMinMaxObserver,\n PerChannelMinMaxObserver,\n MovingAveragePerChannelMinMaxObserver, HistogramObserver]\n\n for obs in obs_list:\n model = SingleLayerLinearModel().eval()\n qconfig = QConfig(activation=default_observer, weight=obs)\n qconfig_dict = {'' : qconfig}\n scripted = torch.jit.script(model)\n scripted = torch.quantization.prepare_jit(scripted, qconfig_dict)\n x = torch.rand(5, 5)\n scripted(x)\n obs_dict = torch.quantization.get_observer_state_dict(scripted)\n\n # Load stats\n scripted_2 = torch.jit.script(model)\n scripted_2 = torch.quantization.prepare_jit(scripted_2, qconfig_dict)\n torch.quantization.load_observer_state_dict(scripted_2, obs_dict)\n # Verify that state_dict matches exactly with original one.\n self.assertEqual(scripted.state_dict(), scripted_2.state_dict())\n\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_observer_qparams_respects_device_affinity(self):\n \"\"\"\n Ensure that the scale and zero_point returned by the observer\n are on the same device as the input tensor.\n \"\"\"\n observerList = [MinMaxObserver(),\n MovingAverageMinMaxObserver(),\n PerChannelMinMaxObserver(),\n MovingAveragePerChannelMinMaxObserver()]\n for obs in observerList:\n device = torch.device('cuda:1')\n x = torch.randn(1, 2, device=device)\n obs.to(device)\n result = obs(x)\n scale, zero_point = obs.calculate_qparams()\n\n self.assertEqual(x.device, scale.device)\n self.assertEqual(x.device, zero_point.device)\n\n def test_zero_numel(self):\n obs_list = [MinMaxObserver, MovingAverageMinMaxObserver,\n PerChannelMinMaxObserver,\n MovingAveragePerChannelMinMaxObserver, HistogramObserver,\n FakeQuantize, FixedQParamsFakeQuantize]\n for obs_cls in obs_list:\n if obs_cls is FixedQParamsFakeQuantize:\n obs = obs_cls(0.1, 0)\n else:\n obs = obs_cls()\n x = torch.tensor([])\n # verify no crash\n x = obs(x)\n\n\n# HistogramObserver that works like it does on master\nclass _ReferenceHistogramObserver(HistogramObserver):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @torch.jit.ignore\n def _non_linear_param_search(self):\n r\"\"\"Non-linear parameter search.\n\n An approximation for L2 error minimization for selecting min/max.\n By selecting new min/max, we filter out outliers in input distribution.\n This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in\n caffe2/quantization/server/norm_minimization.cc\n \"\"\"\n def _get_norm(delta_begin, delta_end, density, norm_type):\n r\"\"\"\n Compute the norm of the values uniformaly distributed between\n delta_begin and delta_end.\n\n norm = density * (integral_{begin, end} x^2)\n = density * (end^3 - begin^3) / 3\n \"\"\"\n assert norm_type == \"L2\", \"Only L2 norms are currently supported\"\n norm = 0.0\n if norm_type == \"L2\":\n norm = (\n delta_end * delta_end * delta_end\n - delta_begin * delta_begin * delta_begin\n ) / 3\n return density * norm\n\n def _compute_quantization_error(next_start_bin, next_end_bin, norm_type):\n r\"\"\"\n Compute the quantization error if we use start_bin to end_bin as the\n min and max to do the quantization.\n \"\"\"\n bin_width = (self.max_val.item() - self.min_val.item()) / self.bins\n\n norm = 0.0\n dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins\n if dst_bin_width == 0.0:\n return 0.0\n for src_bin in range(self.bins):\n # distances from the beginning of first dst_bin to the beginning and\n # end of src_bin\n src_bin_begin = (src_bin - next_start_bin) * bin_width\n src_bin_end = src_bin_begin + bin_width\n\n # which dst_bins the beginning and end of src_bin belong to?\n dst_bin_of_begin = min(\n self.dst_nbins - 1, max(0.0, math.floor(src_bin_begin / dst_bin_width))\n )\n dst_bin_of_end = min(\n self.dst_nbins - 1, max(0.0, math.floor(src_bin_end / dst_bin_width))\n )\n dst_bin_of_begin_center = (\n dst_bin_of_begin * dst_bin_width + dst_bin_width / 2\n )\n\n density = self.histogram[src_bin] / bin_width\n if dst_bin_of_begin == dst_bin_of_end:\n # if src_bin is entirely within 1 dst_bin\n delta_begin = src_bin_begin - dst_bin_of_begin_center\n delta_end = src_bin_end - dst_bin_of_begin_center\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n else:\n delta_begin = src_bin_begin - dst_bin_of_begin_center\n delta_end = dst_bin_width / 2\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n\n norm = norm + (dst_bin_of_end - dst_bin_of_begin - 1) * _get_norm(\n -dst_bin_width / 2, dst_bin_width / 2, density, norm_type\n )\n\n dst_bin_of_end_center = (\n dst_bin_of_end * dst_bin_width + dst_bin_width / 2\n )\n\n delta_begin = -dst_bin_width / 2\n delta_end = src_bin_end - dst_bin_of_end_center\n norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)\n return norm\n\n assert self.histogram.size()[0] == self.bins, \"bins mistmatch\"\n bin_width = (self.max_val - self.min_val) / self.bins\n\n # cumulative sum\n total = sum(self.histogram)\n cSum = torch.cumsum(self.histogram, dim=0)\n\n stepsize = 1e-5 # granularity\n alpha = 0.0 # lower bound\n beta = 1.0 # upper bound\n start_bin = 0\n end_bin = self.bins - 1\n norm_min = float(\"inf\")\n\n while alpha < beta:\n # Find the next step\n next_alpha = alpha + stepsize\n next_beta = beta - stepsize\n\n # find the left and right bins between the quantile bounds\n l = start_bin\n r = end_bin\n while l < end_bin and cSum[l] < next_alpha * total:\n l = l + 1\n while r > start_bin and cSum[r] > next_beta * total:\n r = r - 1\n\n # decide the next move\n next_start_bin = start_bin\n next_end_bin = end_bin\n if (l - start_bin) > (end_bin - r):\n # move the start bin\n next_start_bin = l\n alpha = next_alpha\n else:\n # move the end bin\n next_end_bin = r\n beta = next_beta\n\n if next_start_bin == start_bin and next_end_bin == end_bin:\n continue\n\n # calculate the quantization error using next_start_bin and next_end_bin\n norm = _compute_quantization_error(next_start_bin, next_end_bin, \"L2\")\n\n if norm > norm_min:\n break\n norm_min = norm\n start_bin = next_start_bin\n end_bin = next_end_bin\n\n new_min = self.min_val + bin_width * start_bin\n new_max = self.min_val + bin_width * (end_bin + 1)\n return new_min, new_max\n\nclass TestRecordHistogramObserver(QuantizationTestCase):\n # TODO: move this to quantize.py\n def test_record_observer(self):\n for qengine in supported_qengines:\n with override_quantized_engine(qengine):\n model = AnnotatedSingleLayerLinearModel()\n model.qconfig = default_debug_qconfig\n model = prepare(model)\n # run the evaluation and dump all tensors\n test_only_eval_fn(model, self.calib_data)\n test_only_eval_fn(model, self.calib_data)\n observer_dict = {}\n get_observer_dict(model, observer_dict)\n\n self.assertTrue('fc1.module.activation_post_process' in observer_dict.keys(),\n 'observer is not recorded in the dict')\n self.assertEqual(len(observer_dict['fc1.module.activation_post_process'].get_tensor_value()),\n 2 * len(self.calib_data))\n self.assertEqual(observer_dict['fc1.module.activation_post_process'].get_tensor_value()[0],\n model(self.calib_data[0][0]))\n\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)))\n def test_observer_scriptable(self, qdtype, qscheme):\n obs = RecordingObserver(dtype=qdtype, qscheme=qscheme)\n scripted = torch.jit.script(obs)\n\n x = torch.rand(3, 4)\n obs(x)\n scripted(x)\n self.assertTrue(torch.equal(obs.get_tensor_value()[0], scripted.get_tensor_value()[0]))\n buf = io.BytesIO()\n torch.jit.save(scripted, buf)\n buf.seek(0)\n loaded = torch.jit.load(buf)\n self.assertTrue(torch.equal(obs.get_tensor_value()[0], loaded.get_tensor_value()[0]))\n\nclass TestHistogramObserver(QuantizationTestCase):\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from(\n (torch.per_tensor_affine, torch.per_tensor_symmetric))\n )\n def test_observer_scriptable(self, qdtype, qscheme):\n ob_list = [\n HistogramObserver(dtype=qdtype, qscheme=qscheme),\n default_histogram_observer()\n ]\n for obs in ob_list:\n scripted = torch.jit.script(obs)\n\n x = torch.rand(3, 4)\n obs(x)\n scripted(x)\n self.assertTrue(torch.equal(obs.histogram, scripted.histogram))\n buf = io.BytesIO()\n torch.jit.save(scripted, buf)\n buf.seek(0)\n loaded = torch.jit.load(buf)\n self.assertTrue(torch.equal(obs.histogram, scripted.histogram))\n\n @given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),\n qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),\n reduce_range=st.booleans())\n @settings(max_examples=10)\n def test_histogram_observer(self, qdtype, qscheme, reduce_range):\n myobs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)\n # Calculate qparams should work for empty observers\n qparams = myobs.calculate_qparams()\n x = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)\n y = torch.tensor([5.0, 6.0, 7.0, 8.0])\n out_x = myobs(x)\n self.assertTrue(out_x.requires_grad)\n myobs(y)\n self.assertEqual(myobs.min_val, 2.0)\n self.assertEqual(myobs.max_val, 8.0)\n self.assertEqual(myobs.histogram, [2., 3., 3.])\n\n qparams = myobs.calculate_qparams()\n\n if reduce_range:\n if qscheme == torch.per_tensor_symmetric:\n ref_scale = 0.0470588 * 255 / 127\n ref_zero_point = 0 if qdtype is torch.qint8 else 128\n else:\n ref_scale = 0.0235294 * 255 / 127\n ref_zero_point = -64 if qdtype is torch.qint8 else 0\n else:\n if qscheme == torch.per_tensor_symmetric:\n ref_scale = 0.0470588\n ref_zero_point = 0 if qdtype is torch.qint8 else 128\n else:\n ref_scale = 0.0235294\n ref_zero_point = -128 if qdtype is torch.qint8 else 0\n\n self.assertEqual(qparams[1].item(), ref_zero_point)\n self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)\n # Test for serializability\n state_dict = myobs.state_dict()\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_dict[key])\n loaded_obs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)\n loaded_obs.load_state_dict(loaded_dict)\n loaded_qparams = loaded_obs.calculate_qparams()\n self.assertEqual(myobs.min_val, loaded_obs.min_val)\n self.assertEqual(myobs.max_val, loaded_obs.max_val)\n self.assertEqual(myobs.histogram, loaded_obs.histogram)\n self.assertEqual(myobs.bins, loaded_obs.bins)\n self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())\n\n def test_histogram_observer_one_sided(self):\n myobs = HistogramObserver(bins=8, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True)\n x = torch.tensor([0.0, 0.3, 1.2, 1.7])\n y = torch.tensor([0.1, 1.3, 2.0, 2.7])\n myobs(x)\n myobs(y)\n self.assertEqual(myobs.min_val, 0)\n qparams = myobs.calculate_qparams()\n self.assertEqual(qparams[1].item(), 0)\n\n def test_histogram_observer_same_inputs(self):\n myobs = HistogramObserver(bins=3, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False)\n w = torch.ones(4, requires_grad=True)\n x = torch.zeros(4, requires_grad=True)\n y = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)\n z = torch.tensor([5.0, 6.0, 7.0, 8.0])\n myobs(w)\n myobs(x)\n myobs(x)\n myobs(y)\n myobs(z)\n qparams = myobs.calculate_qparams()\n self.assertEqual(myobs.min_val, 2.0)\n self.assertEqual(myobs.max_val, 8.0)\n self.assertEqual(myobs.histogram, [2., 3., 3.])\n\n @given(N=st.sampled_from([10, 1000]),\n bins=st.sampled_from([256, 512, 1024, 2048]),\n dtype=st.sampled_from([torch.qint8, torch.quint8]),\n qscheme=st.sampled_from([torch.per_tensor_affine, torch.per_tensor_symmetric]),\n reduce_range=st.booleans())\n def test_histogram_observer_against_reference(self, N, bins, dtype, qscheme, reduce_range):\n\n ref_obs = _ReferenceHistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)\n my_obs = HistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)\n\n for _ in range(10):\n X = torch.randn(N)\n my_obs(X)\n ref_obs(X)\n\n ref_qparams = ref_obs.calculate_qparams()\n my_qparams = my_obs.calculate_qparams()\n\n self.assertEqual(ref_qparams, my_qparams)\n\n\nclass TestFakeQuantize(TestCase):\n @given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),\n X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),\n qparams=hu.qparams(dtypes=torch.qint8)))\n def test_fq_module_per_channel(self, device, X):\n np.random.seed(NP_RANDOM_SEED)\n X, (scale, zero_point, axis, torch_type) = X\n quant_min = torch.iinfo(torch_type).min\n quant_max = torch.iinfo(torch_type).max\n\n X = to_tensor(X, device)\n X.requires_grad_()\n fq_module = FakeQuantize(default_per_channel_weight_observer, quant_min, quant_max, ch_axis=axis).to(device)\n Y_prime = fq_module(X)\n assert fq_module.scale is not None\n assert fq_module.zero_point is not None\n Y = _fake_quantize_per_channel_affine_reference(X, fq_module.scale,\n fq_module.zero_point, axis, quant_min, quant_max)\n np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n # Test backward\n dout = torch.rand_like(X, dtype=torch.float, device=device)\n Y_prime.backward(dout)\n dX = _fake_quantize_per_channel_affine_grad_reference(dout, X, fq_module.scale,\n fq_module.zero_point, axis, quant_min, quant_max)\n np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)\n\n def test_fq_serializable_per_channel(self):\n observer = default_per_channel_weight_observer\n quant_min = -128\n quant_max = 127\n fq_module = FakeQuantize(observer, quant_min, quant_max)\n X = torch.tensor([[-5, -3.5, -2, 0, 3, 5, 7], [1, 3, 2, 5, 6.5, 8, 10]], dtype=torch.float32)\n y_ref = fq_module(X)\n state_dict = fq_module.state_dict()\n self.assertEqual(state_dict['scale'], [0.054902, 0.078431])\n self.assertEqual(state_dict['zero_point'], [0, 0])\n b = io.BytesIO()\n torch.save(state_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n for key in state_dict:\n self.assertEqual(state_dict[key], loaded_dict[key])\n\ndef _get_buffer_ids(module):\n \"\"\"\n Object addresses stay constant if and only if all modifications are in-place\n \"\"\"\n return [id(v) for k, v in module._buffers.items()]\n\nclass TestDistributed(QuantizationTestCase):\n\n def test_observers_preserve_buffers(self):\n \"\"\"\n Tests that observers only modify buffers in place. Note: this is important\n because nn.DataParallel depends on this assumption to work correctly.\n However, DataParallel does not expose IDs of the replicas, so we test it\n without DataParallel in order to easily access the object IDs.\n \"\"\"\n observer_types = [\n torch.quantization.MinMaxObserver.with_args(dtype=torch.qint8),\n torch.quantization.MovingAverageMinMaxObserver.with_args(dtype=torch.qint8),\n torch.quantization.PerChannelMinMaxObserver.with_args(dtype=torch.qint8),\n torch.quantization.MovingAveragePerChannelMinMaxObserver.with_args(dtype=torch.qint8),\n torch.quantization.HistogramObserver.with_args(dtype=torch.qint8),\n torch.quantization.RecordingObserver.with_args(dtype=torch.qint8),\n torch.quantization.PlaceholderObserver.with_args(dtype=torch.float16),\n ]\n\n for observer_type in observer_types:\n observer = observer_type()\n buffer_ids_before = _get_buffer_ids(observer)\n for _i in range(5):\n inputs = torch.rand((4, 4, 4))\n observer(inputs)\n buffer_ids_after = _get_buffer_ids(observer)\n self.assertEqual(\n buffer_ids_before,\n buffer_ids_after,\n msg=\"{}: Buffers must be modified in place\".format(str(observer)))\n\n def test_fake_quant_preserves_buffers(self):\n \"\"\"\n Tests that fake quant only modifies buffers in place. Note: this is important\n because nn.DataParallel depends on this assumption to work correctly.\n However, DataParallel does not expose IDs of the replicas, so we test it\n without DataParallel in order to easily access the object IDs.\n \"\"\"\n model = torch.quantization.FakeQuantize()\n buffer_ids_before = _get_buffer_ids(model)\n for _i in range(5):\n inputs = torch.rand((4, 4, 4))\n model(inputs)\n model.apply(torch.quantization.enable_fake_quant)\n model.apply(torch.quantization.disable_fake_quant)\n model.apply(torch.quantization.enable_observer)\n model.apply(torch.quantization.disable_observer)\n buffer_ids_after = _get_buffer_ids(model)\n self.assertEqual(\n buffer_ids_before,\n buffer_ids_after,\n msg=\"FakeQuant: Buffers must be modified in place\")\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_qat_data_parallel(self):\n \"\"\"\n Tests that doing QAT in nn.DataParallel does not crash.\n \"\"\"\n if 'fbgemm' not in torch.backends.quantized.supported_engines:\n return\n with override_quantized_engine('fbgemm'):\n device = torch.device('cuda')\n\n model = nn.Sequential(\n torch.quantization.QuantStub(),\n nn.Conv2d(3, 1, 1, bias=False),\n nn.BatchNorm2d(1),\n nn.ReLU(),\n nn.Conv2d(1, 2, 3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(2),\n nn.AvgPool2d(14),\n nn.Sigmoid(),\n torch.quantization.DeQuantStub(),\n )\n\n torch.quantization.fuse_modules(model, [['1', '2', '3'], ['4', '5']], inplace=True)\n\n model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n torch.quantization.prepare_qat(model, inplace=True)\n model = nn.DataParallel(model, device_ids=[0, 1])\n model.to(device)\n model.train()\n\n for epoch in range(3):\n inputs = torch.rand(2, 3, 28, 28).to(device)\n model(inputs)\n if epoch >= 1:\n model.apply(torch.quantization.disable_observer)\n if epoch >= 2:\n model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)\n quant_model = copy.deepcopy(model.module)\n quant_model = torch.quantization.convert(quant_model.eval().cpu(), inplace=False)\n with torch.no_grad():\n out = quant_model(torch.rand(1, 3, 28, 28))\n\n def test_qat_convbn_fused_syncbn_replacement(self):\n \"\"\"\n Tests that SyncBatchNorm replacement works for fused ConvBN.\n \"\"\"\n if 'fbgemm' not in torch.backends.quantized.supported_engines:\n return\n with override_quantized_engine('fbgemm'):\n # create conv-bn\n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.conv = nn.Conv2d(4, 1, 3, padding=1)\n self.bn = nn.BatchNorm2d(1)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n model = Model()\n # fuse it\n fused_model = torch.quantization.fuse_modules(\n model,\n [['conv', 'bn']],\n )\n # convert to QAT\n fused_model.qconfig = torch.quantization.get_default_qconfig('fbgemm')\n torch.quantization.prepare_qat(fused_model, inplace=True)\n # replace with DDP\n fused_model = nn.SyncBatchNorm.convert_sync_batchnorm(fused_model)\n self.assertTrue(\n isinstance(fused_model.conv.bn, nn.SyncBatchNorm),\n \"Expected BN to be converted to SyncBN\")\n\n def test_syncbn_preserves_qconfig(self):\n \"\"\"\n Makes sure that if a BatchNorm is not fused and a qconfig exists,\n convering the module to SyncBatchNorm preserves the qconfig.\n \"\"\"\n m = nn.Sequential(\n nn.Conv2d(1, 1, 1),\n nn.BatchNorm2d(1),\n )\n m[1].qconfig = torch.quantization.default_qconfig\n m = torch.nn.SyncBatchNorm.convert_sync_batchnorm(m)\n self.assertTrue(\n hasattr(m[1], \"qconfig\"),\n \"missing qconfig after SyncBatchNorm conversion\")\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @override_qengines\n def test_device_affinity(self):\n \"\"\"\n Tests that converting a model to QAT respects device affinity\n \"\"\"\n class Model(nn.Module):\n\n def __init__(self):\n super(Model, self).__init__()\n self.conv = nn.Conv2d(1, 1, 1)\n self.bn = nn.BatchNorm2d(1)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n model = Model()\n model.qconfig = torch.quantization.get_default_qat_qconfig(torch.backends.quantized.engine)\n device = torch.device('cuda:0')\n model.to(device)\n torch.quantization.prepare_qat(model, inplace=True)\n model_devices = {p.device for p in model.parameters()} | \\\n {p.device for p in model.buffers()}\n self.assertEqual(len(model_devices), 1)\n model_device = next(iter(model_devices))\n self.assertEqual(model_device, device)\n\n # ensure that running an input on CUDA works without any needed changes\n input = torch.randn(4, 1, 4, 4, device=device)\n model(input)\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_quantization.py TESTNAME\\n\\n\"\n \"instead.\")\n",
"#!/usr/bin/env python3\nimport json\nimport os\nimport shutil\nimport signal\nimport tempfile\nimport unittest\nfrom unittest import mock\n\nfrom torch.distributed.elastic.multiprocessing.errors import (\n ChildFailedError,\n ProcessFailure,\n record,\n)\nfrom torch.distributed.elastic.multiprocessing.errors.error_handler import _write_error\nfrom torch.testing._internal.common_utils import TEST_WITH_TSAN\n\n\nclass SentinelError(Exception):\n # exists so that we can validate that\n # the correct error is raised and propagated\n pass\n\n\n@record\ndef raise_exception_fn():\n raise SentinelError(\"foobar\")\n\n\n@record\ndef good_fn():\n print(\"hello world\")\n\n\n@record\ndef raise_child_failure_error_fn(name, child_error_file=\"\"):\n if child_error_file:\n _write_error(SentinelError(\"foobar\"), child_error_file)\n pf = ProcessFailure(local_rank=0, pid=997, exitcode=1, error_file=child_error_file)\n raise ChildFailedError(name, {0: pf})\n\n\ndef read_resource_file(resource_file: str) -> str:\n with open(os.path.join(os.path.dirname(__file__), resource_file), \"r\") as fp:\n return \"\".join(fp.readlines())\n\n\[email protected](TEST_WITH_TSAN, \"test incompatible with tsan\")\nclass ApiTest(unittest.TestCase):\n def setUp(self):\n self.test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)\n self.test_error_file = os.path.join(self.test_dir, \"error.json\")\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n\n def test_failure_incorrect_reply_file(self):\n content = {\"unknown_key\": \"unknown_value\"}\n with open(self.test_error_file, \"w\") as fp:\n json.dump(content, fp)\n with self.assertRaises(Exception):\n ProcessFailure(\n local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file\n )\n\n def failure_with_error_file(self, exception):\n _write_error(exception, self.test_error_file)\n return ProcessFailure(\n local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file\n )\n\n def failure_without_error_file(self, exitcode):\n return ProcessFailure(\n local_rank=0, pid=997, exitcode=exitcode, error_file=\"ignored.json\"\n )\n\n def test_process_failure_new_format(self):\n error_data = {\"message\": \"test error message\", \"timestamp\": 10}\n with open(self.test_error_file, \"w\") as fp:\n json.dump(error_data, fp)\n pf = ProcessFailure(\n local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file\n )\n self.assertEqual(\"test error message\", pf.message)\n self.assertEqual(10, pf.timestamp)\n\n def test_process_mast_error_format(self):\n error_data = {\"message\": \"test error message\", \"timestamp\": \"10\"}\n with open(self.test_error_file, \"w\") as fp:\n json.dump(error_data, fp)\n pf = ProcessFailure(\n local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file\n )\n self.assertEqual(\"test error message\", pf.message)\n self.assertEqual(10, pf.timestamp)\n\n def test_process_failure(self):\n pf = self.failure_with_error_file(exception=SentinelError(\"foobar\"))\n self.assertEqual(0, pf.local_rank)\n self.assertEqual(997, pf.pid)\n self.assertEqual(1, pf.exitcode)\n self.assertEqual(self.test_error_file, pf.error_file)\n self.assertEqual(\n pf.error_file_data[\"message\"][\"extraInfo\"][\"timestamp\"], str(pf.timestamp)\n )\n self.assertTrue(pf.message) # check not None and not \"\" (empty string)\n self.assertEqual(\"<N/A>\", pf.signal_name())\n\n def test_process_failure_signal(self):\n pf = self.failure_without_error_file(exitcode=-signal.SIGSEGV)\n self.assertEqual(\"SIGSEGV\", pf.signal_name())\n self.assertEqual(\n f\"Signal {signal.SIGSEGV} (SIGSEGV) received by PID {pf.pid}\", pf.message\n )\n\n def test_process_failure_no_error_file(self):\n pf = self.failure_without_error_file(exitcode=138)\n self.assertEqual(\"<N/A>\", pf.signal_name())\n self.assertEqual(\"<N/A>\", pf.error_file)\n self.assertEqual(\"Process failed with exitcode 138\", pf.message)\n\n def test_child_failed_error(self):\n pf0 = self.failure_with_error_file(exception=SentinelError(\"rank 0\"))\n pf1 = self.failure_with_error_file(exception=SentinelError(\"rank 1\"))\n pf2 = self.failure_without_error_file(exitcode=138)\n ex = ChildFailedError(\"trainer.par\", {0: pf0, 1: pf1, 2: pf2})\n self.assertEqual(pf0, ex.get_first_failure()[1])\n # print is intentional and should prints something like this:\n \"\"\"\n *********************************************\n trainer.par FAILED\n =============================================\n Root Cause:\n [0]:\n time: 2020-11-25_21:22:31\n rank: 0 (local_rank: 0)\n exitcode: 1 (pid: 997)\n error_file: /tmp/ApiTesttbb37ier/error.json\n msg: \"SentinelError: rank 0\"\n =============================================\n Other Failures:\n [1]:\n time: 2020-11-25_21:22:31\n rank: 1 (local_rank: 0)\n exitcode: 1 (pid: 997)\n error_file: /tmp/ApiTesttbb37ier/error.json\n msg: \"SentinelError: rank 1\"\n [2]:\n time: 2020-11-25_21:22:31\n rank: 2 (local_rank: 0)\n exitcode: 138 (pid: 997)\n error_file: <N/A>\n msg: \"Process failed with exitcode 138\"\n *********************************************\n \"\"\"\n print(ex)\n\n def test_record(self):\n with mock.patch.dict(\n os.environ, {\"TORCHELASTIC_ERROR_FILE\": self.test_error_file}\n ):\n with self.assertRaises(SentinelError):\n raise_exception_fn()\n\n with open(self.test_error_file, \"r\") as fp:\n err = json.load(fp)\n self.assertIsNotNone(err[\"message\"][\"message\"])\n self.assertIsNotNone(err[\"message\"][\"extraInfo\"][\"py_callstack\"])\n self.assertIsNotNone(err[\"message\"][\"extraInfo\"][\"timestamp\"])\n\n def test_record_no_error_file(self):\n with mock.patch.dict(os.environ, {}):\n with self.assertRaises(SentinelError):\n raise_exception_fn()\n\n # no error file should have been generated\n self.assertFalse(os.path.isfile(self.test_error_file))\n\n def test_record_good_fn(self):\n with mock.patch.dict(\n os.environ, {\"TORCHELASTIC_ERROR_FILE\": self.test_error_file}\n ):\n good_fn()\n # function did not error; no error file should be produced\n self.assertFalse(os.path.isfile(self.test_error_file))\n\n def test_record_child_failure(self):\n trainer_log_dir = os.path.join(self.test_dir, \"trainer\", \"0\")\n os.makedirs(trainer_log_dir)\n trainer_error_file = os.path.join(trainer_log_dir, \"error.json\")\n\n with mock.patch.dict(\n os.environ, {\"TORCHELASTIC_ERROR_FILE\": self.test_error_file}\n ):\n with self.assertRaises(ChildFailedError) as cm:\n raise_child_failure_error_fn(\"trainer\", trainer_error_file)\n pf = cm.exception.get_first_failure()[1]\n # compare worker error file with reply file and overridden error code\n expect = json.load(open(pf.error_file, \"r\"))\n expect[\"message\"][\"errorCode\"] = pf.exitcode\n actual = json.load(open(self.test_error_file, \"r\"))\n self.assertTrue(\n json.dumps(expect, sort_keys=True),\n json.dumps(actual, sort_keys=True),\n )\n\n def test_record_child_failure_no_child_error_file(self):\n with mock.patch.dict(\n os.environ, {\"TORCHELASTIC_ERROR_FILE\": self.test_error_file}\n ):\n with self.assertRaises(ChildFailedError):\n raise_child_failure_error_fn(\"trainer\")\n\n # @record should only copy child error file when ChildFailedError\n # is raised - it should NOT record ChildFailedError itself\n # it SHOULD re-raise ChildFailedError for any upstream system\n # to handle it.\n self.assertFalse(os.path.isfile(self.test_error_file))\n"
] | [
[
"torch.quantization.MinMaxObserver",
"torch.jit.load",
"torch.quantization.get_observer_state_dict",
"torch.rand_like",
"torch.load",
"torch.zeros",
"torch.iinfo",
"torch.quantization.MovingAverageMinMaxObserver.with_args",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.quantization.DeQuantStub",
"torch.quantization.HistogramObserver.with_args",
"torch.no_grad",
"torch.cuda.is_available",
"torch.testing._internal.common_quantization.AnnotatedSingleLayerLinearModel",
"torch.device",
"torch.quantization.QConfig",
"torch.testing._internal.common_quantized.override_quantized_engine",
"torch.save",
"torch.jit.script",
"torch.Size",
"torch.ones",
"torch.quantization.prepare",
"torch.quantization.RecordingObserver.with_args",
"torch.quantization.prepare_qat",
"torch.quantization.prepare_jit",
"torch.randn",
"torch.quantization.HistogramObserver",
"torch.quantization.default_histogram_observer",
"torch.quantization.fuse_modules",
"torch.testing._internal.common_quantization.SingleLayerLinearModel",
"torch.equal",
"torch.tensor",
"torch.nn.Sigmoid",
"torch.rand",
"torch.quantization.QuantStub",
"torch.quantization.PerChannelMinMaxObserver.with_args",
"torch.testing._internal.common_quantization.test_only_eval_fn",
"torch.testing._internal.hypothesis_utils.qparams",
"torch.quantization.MovingAverageMinMaxObserver",
"torch.nn.Conv2d",
"torch.nn.ReLU",
"torch.testing._internal.hypothesis_utils.array_shapes",
"torch.testing._internal.common_quantized.to_tensor",
"torch.nn.AvgPool2d",
"torch.testing._internal.hypothesis_utils.assert_deadline_disabled",
"torch.testing._internal.common_quantized._fake_quantize_per_channel_affine_grad_reference",
"torch.nn.BatchNorm2d",
"torch.quantization.MovingAveragePerChannelMinMaxObserver",
"torch.quantization.get_default_qat_qconfig",
"torch.jit.save",
"torch.quantization.FakeQuantize",
"torch.quantization.PlaceholderObserver.with_args",
"numpy.random.seed",
"torch.testing._internal.common_quantized._fake_quantize_per_channel_affine_reference",
"torch.quantization.get_observer_dict",
"torch.quantization.MovingAveragePerChannelMinMaxObserver.with_args",
"torch.quantization.PerChannelMinMaxObserver",
"torch.quantization.get_default_qconfig",
"torch.quantization.load_observer_state_dict",
"torch.nn.DataParallel",
"torch.cumsum",
"torch.quantization.RecordingObserver",
"torch.quantization.MinMaxObserver.with_args"
],
[
"torch.distributed.elastic.multiprocessing.errors.ProcessFailure",
"torch.distributed.elastic.multiprocessing.errors.error_handler._write_error",
"torch.distributed.elastic.multiprocessing.errors.ChildFailedError"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
karthiksekaran/mlprojects | [
"a16a4adb20e559b54a78f4e6fd26da520b5ea851"
] | [
"Decision Tree - Churn.py"
] | [
"#import modules\r\nimport pandas # for dataframes\r\nimport matplotlib.pyplot as plt # for plotting graphs\r\nimport seaborn as sns # for plotting graphs\r\n\r\ndata=pandas.read_csv('HR_comma_sep.csv')\r\n\r\n# Import LabelEncoder\r\nfrom sklearn import preprocessing\r\n#creating labelEncoder\r\nle = preprocessing.LabelEncoder()\r\n# Converting string labels into numbers.\r\ndata['salary']=le.fit_transform(data['salary'])\r\ndata['Departments ']=le.fit_transform(data['Departments '])\r\n\r\n#Spliting data into Feature and\r\nX=data[['satisfaction_level', 'last_evaluation', 'number_project',\r\n 'average_montly_hours', 'time_spend_company', 'Work_accident',\r\n 'promotion_last_5years', 'Departments ', 'salary']]\r\ny=data['left']\r\n# Import train_test_split function\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Split dataset into training set and test set\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # 70% training and 30% test\r\n#Import Gradient Boosting Classifier model\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\n\r\n#Create Gradient Boosting Classifier\r\ngb = GradientBoostingClassifier()\r\n\r\n#Train the model using the training sets\r\ngb.fit(X_train, y_train)\r\n\r\n#Predict the response for test dataset\r\ny_pred = gb.predict(X_test)\r\n\r\n#Import scikit-learn metrics module for accuracy calculation\r\nfrom sklearn import metrics\r\n# Model Accuracy, how often is the classifier correct?\r\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\r\n# Model Precision\r\nprint(\"Precision:\",metrics.precision_score(y_test, y_pred))\r\n# Model Recall\r\nprint(\"Recall:\",metrics.recall_score(y_test, y_pred))"
] | [
[
"pandas.read_csv",
"sklearn.metrics.precision_score",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.recall_score",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
i-ivanova/Explorify | [
"54d14eedb121bb8cb972f86c807c7fe4ae174ac1"
] | [
"pipeline/jonatan_pipeline.py"
] | [
"import json\nimport pandas as pd\nimport numpy as np\nimport pprint\n\nfrom sklearn.preprocessing import StandardScaler\n\nfrom jonatan_settings import data_path, write_final_df, write_dr_results, audio_features\nfrom jonatan_scrape import get_track_data, get_artist_data, get_audio_feature_data\nfrom jonatan_dr import compute_dr_results\n\n\ndef read_streaming_history():\n with open(data_path + \"StreamingHistory0.json\", mode=\"r\", encoding=\"utf-8\") as f:\n data = json.loads(f.read())\n\n df = pd.DataFrame(data)\n df.endTime = pd.to_datetime(df.endTime)\n return df\n\ndef create_full_df(streamingHistory):\n track_data = get_track_data(streamingHistory)\n artist_data = get_artist_data(streamingHistory)\n track_features = get_audio_feature_data(track_data)\n\n # related_artists = pd.Series(scrape_related_artists(artist_data.artist_id), name=\"id\")\n \n merged = pd.merge(streamingHistory, artist_data, left_on='artistName', right_on='artist_name', how='inner')\n print(f\"\\tlost {streamingHistory.shape[0] - merged.shape[0]} entries when merging with artist_data\")\n print(streamingHistory[~streamingHistory.artistName.isin(merged.artistName)])\n \n merged = pd.merge(merged, track_data, left_on=[\"artistName\", \"trackName\"], right_index=True, how=\"left\")\n \n merged = pd.merge(merged, track_features, left_on=\"track_id\", right_index=True, how=\"left\")\n\n if write_final_df:\n keep_columns = list(streamingHistory.columns) \\\n + [\"artist_genres\", \"artist_id\", \"artist_popularity\", \"track_duration_ms\", \"track_id\", \"track_popularity\"]\n write_df = merged[keep_columns]\n json_str = write_df.to_json(orient=\"records\")\n with open(data_path + \"merged_history.json\", mode=\"w+\", encoding=\"utf-8\") as f:\n f.write(json_str)\n\n return merged\n\ndef get_dr_results(merged):\n for col in audio_features:\n merged[col] = merged[col].transform(float)\n merged[audio_features] = StandardScaler().fit_transform(merged[audio_features]) # Alternative: use MinMaxScaler to fit in specific range like [0, 1]\n\n artist_data = get_artist_data(merged)\n\n # drop entries where features are missing\n nan_entries = merged.danceability.isna()\n print(f\"\\tlost {nan_entries.sum()} entries when droppping entries missing features\")\n print(merged[nan_entries])\n merged = merged[~nan_entries]\n\n dr_results = compute_dr_results(merged, artist_data)\n\n if write_dr_results:\n dr_results = dr_results.replace([np.nan], [None])\n json_str = dr_results.to_json(orient=\"records\")\n with open(data_path + \"dr_results.json\", mode=\"w+\", encoding=\"utf-8\") as f:\n f.write(json_str)\n \n return dr_results\n\ndef main():\n print(\"Starting up pipeline...\")\n print(\"Reading streaming history...\")\n streamingHistory = read_streaming_history()\n print(\"Constructing complete dataset...\")\n merged_df = create_full_df(streamingHistory)\n print(\"Performing dimensionality reduction...\")\n dr_results = get_dr_results(merged_df)\n print(\"COMPLETE!\")\n\nif __name__ == '__main__':\n main()"
] | [
[
"sklearn.preprocessing.StandardScaler",
"pandas.merge",
"pandas.to_datetime",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
toandaominh1997/understanding_cloud_organization | [
"7da991ff3da557c18f4585c1b956ed799c104c7c"
] | [
"utils/metric.py"
] | [
"import torch \nimport numpy as np \n\n\nclass AverageMetric(object):\n def __init__(self, threshold=0.5):\n self.dice_scores = []\n self.threshold = threshold\n def update(self, outputs, labels):\n with torch.no_grad():\n probs = torch.sigmoid(outputs)\n dice_score = self.dice_metric(probability=probs, truth = labels)\n self.dice_scores.append(dice_score)\n def value(self):\n return np.mean(self.dice_scores)\n def reset(self):\n self.dice_scores = []\n def dice_metric(self, probability, truth):\n probability = torch.sigmoid(probability)\n batch_size = len(truth)\n with torch.no_grad():\n probability = probability.view(batch_size, -1)\n truth = truth.view(batch_size, -1)\n assert(probability.shape == truth.shape) \n p = (probability > self.threshold).float() \n t = (truth > 0.5).float() \n t_sum = t.sum(-1)\n p_sum = p.sum(-1)\n neg_index = torch.nonzero(t_sum == 0)\n pos_index = torch.nonzero(t_sum >= 1)\n dice_neg = (p_sum == 0).float()\n dice_pos = 2 * (p*t).sum(-1)/((p+t).sum(-1))\n dice_neg = dice_neg[neg_index]\n dice_pos = dice_pos[pos_index]\n dice = torch.cat([dice_pos, dice_neg]) \n dice = dice.mean().item()\n return dice\n \n"
] | [
[
"torch.sigmoid",
"torch.cat",
"torch.no_grad",
"numpy.mean",
"torch.nonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
n3urovirtual/EyeTracking_Experiment_Children | [
"56c08a1202a686ac8f05d3c4e53766537895fe4f"
] | [
"Learning Scripts/Learn_PT.py"
] | [
"\"\"\"APPLY PYTHAGOREAN THEOREM IN LEARNING DATA + SMOOTH VELOCITIES\"\"\"\n\nimport os\nimport itertools\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import savgol_filter\nfrom helper import img_id, sub_id, TRIALS_PATH\n\n# Apply PT into smoothed learning data to find sample-to-sample distance:\n\nfor file in os.listdir(TRIALS_PATH):\n dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))\n x = dataset[\"BPOGX\"].diff().fillna(0).to_numpy()\n y = dataset[\"BPOGY\"].diff().fillna(0).to_numpy()\n sample_2_sample_distance = (x ** 2 + y ** 2) ** 0.5\n dataset[\"Distance\"] = np.nan_to_num(sample_2_sample_distance)\n dataset[\"Time\"] = dataset[\"TIME\"].diff().fillna(0).to_numpy()\n dataset[\"Velocity_px\"] = dataset[\"Distance\"] / dataset[\"Time\"]\n dataset[\"Velocity_deg\"] = dataset[\"Velocity_px\"] * 0.021\n dataset[\"Velocity_deg\"] = dataset[\"Velocity_deg\"].fillna(0)\n dataset = dataset[dataset[\"Velocity_deg\"] != 0]\n vel = dataset[\"Velocity_deg\"]\n sav_vel = savgol_filter(vel, 13, 2)\n dataset[\"Smoothed_Velocity_deg\"] = sav_vel.tolist()\n fix_or_sac = dataset[\"Smoothed_Velocity_deg\"] > 120\n dataset[\"Fix_or_Sac\"] = np.where(fix_or_sac, \"Sac\", \"Fix\")\n write_f = dataset[dataset[\"Smoothed_Velocity_deg\"] < 1000]\n write_f.to_csv(os.path.join(TRIALS_PATH, file), index=False)\n\n\n# Plot smoothed velocity vs. unsmoothed velocity\nfor k, i in itertools.product(sub_id, img_id):\n try:\n file = (\n \"Sub_\" + str(k) + \"_Image_\" + i.split(\".\")[0] + \"_Block_4.csv\"\n ) # Block 1,2,3,4\n dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20, 11))\n fig.suptitle(\n f'Subject:{str(k)} , Image:{i.split(\".\")[0]}, Block: 4', size=30\n ) # Block 1,2,3,4\n time = dataset[\"Time\"].cumsum()\n smoothed_velocity1 = dataset[\"Velocity_deg\"]\n smoothed_velocity2 = dataset[\"Smoothed_Velocity_deg\"]\n ax1.plot(time, smoothed_velocity1)\n ax1.set_ylim([0, 1000])\n ax1.set_title(\"Unsmoothed velocity\", size=15)\n ax2.plot(time, smoothed_velocity2)\n ax2.set_ylim([0, 1000])\n ax2.set_title(\"Smoothed velocity\", size=15)\n # plt.axhline(90, color='red')\n # plt.title(f'Subject:{str(k)} , Image:{i.split(\".\")[0]} , Block: 1')\n ax2.axhline(120, color=\"red\")\n fig.text(\n 0.5, \n 0.04, \n \"Time (in seconds)\", \n ha=\"center\", \n va=\"center\", \n fontsize=22\n )\n fig.text(\n 0.08,\n 0.5,\n \"Velocity (deg/sec.)\",\n ha=\"center\",\n va=\"center\",\n rotation=\"vertical\",\n fontsize=22,\n )\n plt.show()\n plt.close()\n except OSError:\n continue\n\n#Plot to fine-tune the velocity threshold\nfor k, i in itertools.product(sub_id, img_id):\n file = (\n \"Sub_\" + str(k) + \"_Image_\" + i.split(\".\")[0] + \"_Block_1.csv\"\n ) # Block 1,2,3,4\n dataset = pd.read_csv(os.path.join(TRIALS_PATH, file))\n time = dataset[\"Time\"].cumsum().fillna(0)\n velocity = dataset[\"Smoothed_Velocity_deg\"]\n plt.plot(time, velocity)\n plt.axhline(100, color=\"red\")\n plt.ylim(0, 1000)\n plt.title(f\"Subject:{str(k)} , Image:{str(i)}\")\n plt.xlabel(\"Time (sec)\")\n plt.ylabel(\"Velocity values\")\n plt.show()\n plt.close()\n"
] | [
[
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.ylim",
"numpy.nan_to_num",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.where",
"scipy.signal.savgol_filter",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
the-timoye/spark-examples | [
"d8784653a862e09e1c755ed2055d37a5516d2c3f"
] | [
"data_lakes.py"
] | [
"import pandas as pd\nimport pyspark.sql.functions as F\nfrom pyspark.sql import SparkSession\n\nimport sparknlp\nsparknlp.start()\nfrom sparknlp.pretrained import PretrainedPipeline\n\npd.set_option('max_colwidth', 800)\nspark = SparkSession.builder.config(\"spark.jars.packages\", \"com.johnsnowlabs.nlp:spark-nlp_2.12:3.0.3\").getOrCreate()\nspark\n\ndata_path = 'data/reddit-worldnews.json'\ndf = spark.read.json(data_path)\n\nprint('dataframe count = {}'.format(df.count()))\n\ntitle = 'data.title'\nauthor = 'data.author'\n\nprint('============== AUTHOR Vs TITLE ==============')\ndf_author_title = df.select(title, author)\nprint(df_author_title.limit(10).toPandas())\n\nprint('============== WORD COUNT ==============')\ndf_word_count = df.select(F.explode(F.split(title, '\\\\s+')).alias(\"word\")).groupBy(\"word\").count().sort(F.desc('count'))\nprint(df_word_count.limit(20).toPandas())\n\nprint('============== ANNOTATED DATAFRAME SCHEMA ==============')\nexplain_document_pipeline = PretrainedPipeline(\"explain_document_ml\")\ndf_annotated = explain_document_pipeline.annotate(df_author_title, \"title\")\ndf_annotated.printSchema()\n\nprint('============== QUERY MAPPEED TYPE SUB-FIELDS ==============')\ndf_check_data = df_annotated.select([\"text\", \"pos.metadata\", \"pos.result\"])\nprint(df_check_data.limit(10).toPandas())\n\n\n# extract POS from the annotated dataframe\ndf_pos = df_annotated.select(F.explode(\"pos\").alias(\"pos\"))\nprint(df_pos.toPandas())\ndf_pos.printSchema()\n\nprint('============== VIEW ONLY PROPER NOUNS ==============')\ndf_pos_nouns = df_pos.where(\"pos.result = 'NNP' OR pos.result = 'NNPS'\")\ndf_nouns = df_pos_nouns.selectExpr([\"pos.metadata['word'] AS word\", \"pos.result AS part_of_speech\"])\nprint(df_nouns.limit(10).toPandas())\n\nprint('============== VIEW MOST USED NOUNS==============')\ndf_common_nouns = df_nouns.groupBy(\"word\").count().sort(F.desc(\"count\"))\nprint(df_common_nouns.toPandas())"
] | [
[
"pandas.set_option"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jjaramillo34/pyimagesearchuniversity_course | [
"0a4a26c29a6f8122f6a03d3393ac01ebbc14a391",
"0a4a26c29a6f8122f6a03d3393ac01ebbc14a391"
] | [
"Aumented Reality 101/Fiducials and Markers/opencv-generate-aruco/opencv_generate_aruco.py",
"OpenCV 102/Basic Image Processing Operations/auto-canny/auto_canny_practice.py"
] | [
"# USAGE\n# python opencv_generate_aruco.py --id 24 --type DICT_5X5_100 --output tags/DICT_5X5_100_id24.png\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport cv2\nimport sys\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-o\", \"--output\", required=True,\n\thelp=\"path to output image containing ArUCo tag\")\nap.add_argument(\"-i\", \"--id\", type=int, required=True,\n\thelp=\"ID of ArUCo tag to generate\")\nap.add_argument(\"-t\", \"--type\", type=str,\n\tdefault=\"DICT_ARUCO_ORIGINAL\",\n\thelp=\"type of ArUCo tag to generate\")\nargs = vars(ap.parse_args())\n\n# define names of each possible ArUco tag OpenCV supports\nARUCO_DICT = {\n\t\"DICT_4X4_50\": cv2.aruco.DICT_4X4_50,\n\t\"DICT_4X4_100\": cv2.aruco.DICT_4X4_100,\n\t\"DICT_4X4_250\": cv2.aruco.DICT_4X4_250,\n\t\"DICT_4X4_1000\": cv2.aruco.DICT_4X4_1000,\n\t\"DICT_5X5_50\": cv2.aruco.DICT_5X5_50,\n\t\"DICT_5X5_100\": cv2.aruco.DICT_5X5_100,\n\t\"DICT_5X5_250\": cv2.aruco.DICT_5X5_250,\n\t\"DICT_5X5_1000\": cv2.aruco.DICT_5X5_1000,\n\t\"DICT_6X6_50\": cv2.aruco.DICT_6X6_50,\n\t\"DICT_6X6_100\": cv2.aruco.DICT_6X6_100,\n\t\"DICT_6X6_250\": cv2.aruco.DICT_6X6_250,\n\t\"DICT_6X6_1000\": cv2.aruco.DICT_6X6_1000,\n\t\"DICT_7X7_50\": cv2.aruco.DICT_7X7_50,\n\t\"DICT_7X7_100\": cv2.aruco.DICT_7X7_100,\n\t\"DICT_7X7_250\": cv2.aruco.DICT_7X7_250,\n\t\"DICT_7X7_1000\": cv2.aruco.DICT_7X7_1000,\n\t\"DICT_ARUCO_ORIGINAL\": cv2.aruco.DICT_ARUCO_ORIGINAL,\n\t\"DICT_APRILTAG_16h5\": cv2.aruco.DICT_APRILTAG_16h5,\n\t\"DICT_APRILTAG_25h9\": cv2.aruco.DICT_APRILTAG_25h9,\n\t\"DICT_APRILTAG_36h10\": cv2.aruco.DICT_APRILTAG_36h10,\n\t\"DICT_APRILTAG_36h11\": cv2.aruco.DICT_APRILTAG_36h11\n}\n\n# verify that the supplied ArUCo tag exists and is supported by\n# OpenCV\nif ARUCO_DICT.get(args[\"type\"], None) is None:\n\tprint(\"[INFO] ArUCo tag of '{}' is not supported\".format(\n\t\targs[\"type\"]))\n\tsys.exit(0)\n\n# load the ArUCo dictionary\narucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[args[\"type\"]])\n\n# allocate memory for the output ArUCo tag and then draw the ArUCo\n# tag on the output image\nprint(\"[INFO] generating ArUCo tag type '{}' with ID '{}'\".format(\n\targs[\"type\"], args[\"id\"]))\ntag = np.zeros((300, 300, 1), dtype=\"uint8\")\ncv2.aruco.drawMarker(arucoDict, args[\"id\"], 300, tag, 1)\n\n# write the generated ArUCo tag to disk and then display it to our\n# screen\ncv2.imwrite(args[\"output\"], tag)\ncv2.imshow(\"ArUCo Tag\", tag)\ncv2.waitKey(0)",
"# USAGE\n# python auto_canny_practice.py --images images\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport glob\nimport cv2\n\ndef auto_canny(image, sigma=0.33):\n # compute the median of the single channel pixel intensities\n v = np.median(image)\n # apply automatic Canny edge detection using the computed median\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 - sigma) * v))\n edged = cv2.Canny(image, lower, upper)\n \n # return the edged image\n return edged\n\n# construct the argument parser and the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--images\", type=str, required=True,\n\thelp=\"path to the input image\")\nargs = vars(ap.parse_args())\n\n# loop over the images\nfor imagePath in glob.glob(args[\"images\"] + \"/*.jpg\"):\n # convert the image to grayscale and blur it slightly\n image = cv2.imread(imagePath)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (3, 3), 0)\n \n # apply Canny edge detection using a wide threshold, tight threshold, \n # and automatically determined threshold\n wide = cv2.Canny(blurred, 10, 200)\n tight = cv2.Canny(blurred, 225, 250)\n auto = auto_canny(blurred)\n \n # show the images\n cv2.imshow(\"Original\", image)\n cv2.imshow(\"Edges\", np.hstack([wide, tight, auto]))\n cv2.waitKey(0)"
] | [
[
"numpy.zeros"
],
[
"numpy.hstack",
"numpy.median"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lnmdlong/mmdetection | [
"87768a5d0a0188d46c50b575b417e9ec2fb5c06c"
] | [
"mmdet/models/roi_heads/mask_heads/fcn_mask_head.py"
] | [
"import os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer\nfrom mmcv.ops.carafe import CARAFEPack\nfrom mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core import mask_target\nfrom mmdet.models.builder import HEADS, build_loss\n\nBYTES_PER_FLOAT = 4\n# TODO: This memory limit may be too much or too little. It would be better to\n# determine it based on available resources.\nGPU_MEM_LIMIT = 1024**3 # 1 GB memory limit\n\n\[email protected]_module()\nclass FCNMaskHead(BaseModule):\n\n def __init__(self,\n num_convs=4,\n roi_feat_size=14,\n in_channels=256,\n conv_kernel_size=3,\n conv_out_channels=256,\n num_classes=80,\n class_agnostic=False,\n upsample_cfg=dict(type='deconv', scale_factor=2),\n conv_cfg=None,\n norm_cfg=None,\n predictor_cfg=dict(type='Conv'),\n loss_mask=dict(\n type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),\n init_cfg=None):\n assert init_cfg is None, 'To prevent abnormal initialization ' \\\n 'behavior, init_cfg is not allowed to be set'\n super(FCNMaskHead, self).__init__(init_cfg)\n self.upsample_cfg = upsample_cfg.copy()\n if self.upsample_cfg['type'] not in [\n None, 'deconv', 'nearest', 'bilinear', 'carafe'\n ]:\n raise ValueError(\n f'Invalid upsample method {self.upsample_cfg[\"type\"]}, '\n 'accepted methods are \"deconv\", \"nearest\", \"bilinear\", '\n '\"carafe\"')\n self.num_convs = num_convs\n # WARN: roi_feat_size is reserved and not used\n self.roi_feat_size = _pair(roi_feat_size)\n self.in_channels = in_channels\n self.conv_kernel_size = conv_kernel_size\n self.conv_out_channels = conv_out_channels\n self.upsample_method = self.upsample_cfg.get('type')\n self.scale_factor = self.upsample_cfg.pop('scale_factor', None)\n self.num_classes = num_classes\n self.class_agnostic = class_agnostic\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.predictor_cfg = predictor_cfg\n self.fp16_enabled = False\n self.loss_mask = build_loss(loss_mask)\n\n self.convs = ModuleList()\n for i in range(self.num_convs):\n in_channels = (\n self.in_channels if i == 0 else self.conv_out_channels)\n padding = (self.conv_kernel_size - 1) // 2\n self.convs.append(\n ConvModule(\n in_channels,\n self.conv_out_channels,\n self.conv_kernel_size,\n padding=padding,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg))\n upsample_in_channels = (\n self.conv_out_channels if self.num_convs > 0 else in_channels)\n upsample_cfg_ = self.upsample_cfg.copy()\n if self.upsample_method is None:\n self.upsample = None\n elif self.upsample_method == 'deconv':\n upsample_cfg_.update(\n in_channels=upsample_in_channels,\n out_channels=self.conv_out_channels,\n kernel_size=self.scale_factor,\n stride=self.scale_factor)\n self.upsample = build_upsample_layer(upsample_cfg_)\n elif self.upsample_method == 'carafe':\n upsample_cfg_.update(\n channels=upsample_in_channels, scale_factor=self.scale_factor)\n self.upsample = build_upsample_layer(upsample_cfg_)\n else:\n # suppress warnings\n align_corners = (None\n if self.upsample_method == 'nearest' else False)\n upsample_cfg_.update(\n scale_factor=self.scale_factor,\n mode=self.upsample_method,\n align_corners=align_corners)\n self.upsample = build_upsample_layer(upsample_cfg_)\n\n out_channels = 1 if self.class_agnostic else self.num_classes\n logits_in_channel = (\n self.conv_out_channels\n if self.upsample_method == 'deconv' else upsample_in_channels)\n self.conv_logits = build_conv_layer(self.predictor_cfg,\n logits_in_channel, out_channels, 1)\n self.relu = nn.ReLU(inplace=True)\n self.debug_imgs = None\n\n def init_weights(self):\n super(FCNMaskHead, self).init_weights()\n for m in [self.upsample, self.conv_logits]:\n if m is None:\n continue\n elif isinstance(m, CARAFEPack):\n m.init_weights()\n else:\n nn.init.kaiming_normal_(\n m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.constant_(m.bias, 0)\n\n @auto_fp16()\n def forward(self, x):\n for conv in self.convs:\n x = conv(x)\n if self.upsample is not None:\n x = self.upsample(x)\n if self.upsample_method == 'deconv':\n x = self.relu(x)\n mask_pred = self.conv_logits(x)\n return mask_pred\n\n def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):\n pos_proposals = [res.pos_bboxes for res in sampling_results]\n pos_assigned_gt_inds = [\n res.pos_assigned_gt_inds for res in sampling_results\n ]\n mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,\n gt_masks, rcnn_train_cfg)\n return mask_targets\n\n @force_fp32(apply_to=('mask_pred', ))\n def loss(self, mask_pred, mask_targets, labels):\n \"\"\"\n Example:\n >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA\n >>> N = 7 # N = number of extracted ROIs\n >>> C, H, W = 11, 32, 32\n >>> # Create example instance of FCN Mask Head.\n >>> # There are lots of variations depending on the configuration\n >>> self = FCNMaskHead(num_classes=C, num_convs=1)\n >>> inputs = torch.rand(N, self.in_channels, H, W)\n >>> mask_pred = self.forward(inputs)\n >>> sf = self.scale_factor\n >>> labels = torch.randint(0, C, size=(N,))\n >>> # With the default properties the mask targets should indicate\n >>> # a (potentially soft) single-class label\n >>> mask_targets = torch.rand(N, H * sf, W * sf)\n >>> loss = self.loss(mask_pred, mask_targets, labels)\n >>> print('loss = {!r}'.format(loss))\n \"\"\"\n loss = dict()\n if mask_pred.size(0) == 0:\n loss_mask = mask_pred.sum()\n else:\n if self.class_agnostic:\n loss_mask = self.loss_mask(mask_pred, mask_targets,\n torch.zeros_like(labels))\n else:\n loss_mask = self.loss_mask(mask_pred, mask_targets, labels)\n loss['loss_mask'] = loss_mask\n return loss\n\n def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,\n ori_shape, scale_factor, rescale):\n \"\"\"Get segmentation masks from mask_pred and bboxes.\n\n Args:\n mask_pred (Tensor or ndarray): shape (n, #class, h, w).\n For single-scale testing, mask_pred is the direct output of\n model, whose type is Tensor, while for multi-scale testing,\n it will be converted to numpy array outside of this method.\n det_bboxes (Tensor): shape (n, 4/5)\n det_labels (Tensor): shape (n, )\n rcnn_test_cfg (dict): rcnn testing config\n ori_shape (Tuple): original image height and width, shape (2,)\n scale_factor(float | Tensor): If ``rescale is True``, box\n coordinates are divided by this scale factor to fit\n ``ori_shape``.\n rescale (bool): If True, the resulting masks will be rescaled to\n ``ori_shape``.\n\n Returns:\n list[list]: encoded masks. The c-th item in the outer list\n corresponds to the c-th class. Given the c-th outer list, the\n i-th item in that inner list is the mask for the i-th box with\n class label c.\n\n Example:\n >>> import mmcv\n >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA\n >>> N = 7 # N = number of extracted ROIs\n >>> C, H, W = 11, 32, 32\n >>> # Create example instance of FCN Mask Head.\n >>> self = FCNMaskHead(num_classes=C, num_convs=0)\n >>> inputs = torch.rand(N, self.in_channels, H, W)\n >>> mask_pred = self.forward(inputs)\n >>> # Each input is associated with some bounding box\n >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)\n >>> det_labels = torch.randint(0, C, size=(N,))\n >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })\n >>> ori_shape = (H * 4, W * 4)\n >>> scale_factor = torch.FloatTensor((1, 1))\n >>> rescale = False\n >>> # Encoded masks are a list for each category.\n >>> encoded_masks = self.get_seg_masks(\n >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,\n >>> scale_factor, rescale\n >>> )\n >>> assert len(encoded_masks) == C\n >>> assert sum(list(map(len, encoded_masks))) == N\n \"\"\"\n if not isinstance(mask_pred, torch.Tensor):\n mask_pred = det_bboxes.new_tensor(mask_pred)\n\n device = mask_pred.device\n cls_segms = [[] for _ in range(self.num_classes)\n ] # BG is not included in num_classes\n bboxes = det_bboxes[:, :4]\n labels = det_labels\n # No need to consider rescale and scale_factor while exporting to ONNX\n if torch.onnx.is_in_onnx_export():\n img_h, img_w = ori_shape[:2]\n else:\n if rescale:\n img_h, img_w = ori_shape[:2]\n else:\n if isinstance(scale_factor, float):\n img_h = np.round(ori_shape[0] * scale_factor).astype(\n np.int32)\n img_w = np.round(ori_shape[1] * scale_factor).astype(\n np.int32)\n else:\n w_scale, h_scale = scale_factor[0], scale_factor[1]\n img_h = np.round(ori_shape[0] * h_scale.item()).astype(\n np.int32)\n img_w = np.round(ori_shape[1] * w_scale.item()).astype(\n np.int32)\n scale_factor = 1.0\n\n if not isinstance(scale_factor, (float, torch.Tensor)):\n scale_factor = bboxes.new_tensor(scale_factor)\n bboxes = bboxes / scale_factor\n\n # support exporting to ONNX\n if torch.onnx.is_in_onnx_export():\n threshold = rcnn_test_cfg.mask_thr_binary\n if not self.class_agnostic:\n box_inds = torch.arange(mask_pred.shape[0])\n mask_pred = mask_pred[box_inds, labels][:, None]\n masks, _ = _do_paste_mask(\n mask_pred, bboxes, img_h, img_w, skip_empty=False)\n if threshold >= 0:\n masks = (masks >= threshold).to(dtype=torch.bool)\n else:\n # TensorRT backend does not have data type of uint8\n is_trt_backend = os.environ.get(\n 'ONNX_BACKEND') == 'MMCVTensorRT'\n target_dtype = torch.int32 if is_trt_backend else torch.uint8\n masks = (masks * 255).to(dtype=target_dtype)\n return masks\n\n N = len(mask_pred)\n # The actual implementation split the input into chunks,\n # and paste them chunk by chunk.\n if device.type == 'cpu':\n # CPU is most efficient when they are pasted one by one with\n # skip_empty=True, so that it performs minimal number of\n # operations.\n num_chunks = N\n else:\n # GPU benefits from parallelism for larger chunks,\n # but may have memory issue\n num_chunks = int(\n np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))\n assert (num_chunks <=\n N), 'Default GPU_MEM_LIMIT is too small; try increasing it'\n chunks = torch.chunk(torch.arange(N, device=device), num_chunks)\n\n threshold = rcnn_test_cfg.mask_thr_binary\n im_mask = torch.zeros(\n N,\n img_h,\n img_w,\n device=device,\n dtype=torch.bool if threshold >= 0 else torch.uint8)\n\n if not self.class_agnostic:\n mask_pred = mask_pred[range(N), labels][:, None]\n\n for inds in chunks:\n masks_chunk, spatial_inds = _do_paste_mask(\n mask_pred[inds],\n bboxes[inds],\n img_h,\n img_w,\n skip_empty=device.type == 'cpu')\n\n if threshold >= 0:\n masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)\n else:\n # for visualization and debugging\n masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)\n\n im_mask[(inds, ) + spatial_inds] = masks_chunk\n\n if torch.jit.is_tracing():\n return im_mask.detach().int()\n\n for i in range(N):\n cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())\n return cls_segms\n\n\ndef _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):\n \"\"\"Paste instance masks according to boxes.\n\n This implementation is modified from\n https://github.com/facebookresearch/detectron2/\n\n Args:\n masks (Tensor): N, 1, H, W\n boxes (Tensor): N, 4\n img_h (int): Height of the image to be pasted.\n img_w (int): Width of the image to be pasted.\n skip_empty (bool): Only paste masks within the region that\n tightly bound all boxes, and returns the results this region only.\n An important optimization for CPU.\n\n Returns:\n tuple: (Tensor, tuple). The first item is mask tensor, the second one\n is the slice object.\n If skip_empty == False, the whole image will be pasted. It will\n return a mask of shape (N, img_h, img_w) and an empty tuple.\n If skip_empty == True, only area around the mask will be pasted.\n A mask of shape (N, h', w') and its start and end coordinates\n in the original image will be returned.\n \"\"\"\n # On GPU, paste all masks together (up to chunk size)\n # by using the entire image to sample the masks\n # Compared to pasting them one by one,\n # this has more operations but is faster on COCO-scale dataset.\n device = masks.device\n if skip_empty:\n x0_int, y0_int = torch.clamp(\n boxes.min(dim=0).values.floor()[:2] - 1,\n min=0).to(dtype=torch.int32)\n x1_int = torch.clamp(\n boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)\n y1_int = torch.clamp(\n boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)\n else:\n x0_int, y0_int = 0, 0\n x1_int, y1_int = img_w, img_h\n x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1\n\n N = masks.shape[0]\n\n img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5\n img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5\n img_y = (img_y - y0) / (y1 - y0) * 2 - 1\n img_x = (img_x - x0) / (x1 - x0) * 2 - 1\n # img_x, img_y have shapes (N, w), (N, h)\n # IsInf op is not supported with ONNX<=1.7.0\n if not torch.onnx.is_in_onnx_export():\n if torch.isinf(img_x).any():\n inds = torch.where(torch.isinf(img_x))\n img_x[inds] = 0\n if torch.isinf(img_y).any():\n inds = torch.where(torch.isinf(img_y))\n img_y[inds] = 0\n\n gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))\n gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))\n grid = torch.stack([gx, gy], dim=3)\n\n img_masks = F.grid_sample(\n masks.to(dtype=torch.float32), grid, align_corners=False)\n\n if skip_empty:\n return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))\n else:\n return img_masks[:, 0], ()\n"
] | [
[
"torch.isinf",
"torch.zeros",
"torch.split",
"torch.nn.init.constant_",
"torch.zeros_like",
"torch.arange",
"numpy.round",
"numpy.ceil",
"torch.jit.is_tracing",
"torch.nn.modules.utils._pair",
"torch.stack",
"torch.nn.ReLU",
"torch.onnx.is_in_onnx_export",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
symphonylyh/transformers | [
"03e5d5196ca76008b60da9bb6d604e6bdbcba0db",
"03e5d5196ca76008b60da9bb6d604e6bdbcba0db"
] | [
"tests/t5/test_modeling_tf_t5.py",
"src/transformers/models/deberta_v2/modeling_deberta_v2_original.py"
] | [
"# coding=utf-8\n# Copyright 2018 Google T5 Authors and HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom transformers import T5Config, is_tf_available\nfrom transformers.file_utils import cached_property\nfrom transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow\n\nfrom ..test_configuration_common import ConfigTester\nfrom ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor\n\n\nif is_tf_available():\n import tensorflow as tf\n\n from transformers import ByT5Tokenizer, T5Tokenizer, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model\n\n\nclass TFT5ModelTester:\n def __init__(\n self,\n parent,\n ):\n self.parent = parent\n self.batch_size = 13\n self.seq_length = 7\n self.is_training = True\n self.use_input_mask = True\n self.use_labels = True\n self.vocab_size = 99\n self.n_positions = 14\n self.hidden_size = 32\n self.num_hidden_layers = 5\n self.num_attention_heads = 4\n self.d_ff = 37\n self.relative_attention_num_buckets = 8\n self.dropout_rate = 0.1\n self.initializer_factor = 0.002\n self.eos_token_id = 1\n self.pad_token_id = 0\n self.scope = None\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)\n\n token_labels = None\n if self.use_labels:\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n config = T5Config(\n vocab_size=self.vocab_size,\n n_positions=self.n_positions,\n d_model=self.hidden_size,\n d_ff=self.d_ff,\n d_kv=self.hidden_size // self.num_attention_heads,\n num_layers=self.num_hidden_layers,\n num_heads=self.num_attention_heads,\n relative_attention_num_buckets=self.relative_attention_num_buckets,\n dropout_rate=self.dropout_rate,\n initializer_factor=self.initializer_factor,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.pad_token_id,\n pad_token_id=self.pad_token_id,\n decoder_start_token_id=self.pad_token_id,\n )\n\n return (config, input_ids, input_mask, token_labels)\n\n def create_and_check_t5_model(self, config, input_ids, input_mask, token_labels):\n model = TFT5Model(config=config)\n inputs = {\n \"input_ids\": input_ids,\n \"decoder_input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n result = model(inputs)\n\n result = model(input_ids, decoder_attention_mask=input_mask, decoder_input_ids=input_ids)\n decoder_output = result.last_hidden_state\n decoder_past = result.past_key_values\n encoder_output = result.encoder_last_hidden_state\n self.parent.assertListEqual(list(encoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size])\n self.parent.assertListEqual(list(decoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size])\n # There should be `num_layers` key value embeddings stored in decoder_past[1]\n self.parent.assertEqual(len(decoder_past), config.num_layers)\n # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past[1] tuple\n self.parent.assertEqual(len(decoder_past[0]), 4)\n\n def create_and_check_t5_with_lm_head(self, config, input_ids, input_mask, token_labels):\n model = TFT5ForConditionalGeneration(config=config)\n inputs_dict = {\n \"input_ids\": input_ids,\n \"decoder_input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n\n result = model(inputs_dict)\n\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_t5_decoder_model_past(self, config, input_ids, decoder_input_ids, attention_mask):\n model = TFT5Model(config=config).get_decoder()\n\n input_ids = input_ids[:1, :]\n self.batch_size = 1\n\n # first forward pass\n outputs = model(input_ids, use_cache=True)\n\n outputs_use_cache_conf = model(input_ids)\n outputs_no_past = model(input_ids, use_cache=False)\n\n self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))\n self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # append to next input_ids and\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n\n output_from_no_past = model(next_input_ids)[0]\n output_from_past = model(next_tokens, past_key_values=outputs.past_key_values)[0]\n\n # select random slice\n random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]\n output_from_past_slice = output_from_past[:, 0, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)\n\n def create_and_check_t5_decoder_model_attention_mask_past(\n self, config, input_ids, decoder_input_ids, attention_mask\n ):\n model = TFT5Model(config=config).get_decoder()\n\n # create attention mask\n half_seq_length = self.seq_length // 2\n attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)\n attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)\n attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)\n\n # first forward pass\n outputs = model(input_ids, attention_mask=attn_mask, use_cache=True)\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # change a random masked slice from input_ids\n random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1\n random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)\n vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)\n condition = tf.transpose(\n tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))\n )\n input_ids = tf.where(condition, random_other_next_tokens, input_ids)\n\n # append to next input_ids and attn_mask\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n attn_mask = tf.concat(\n [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)],\n axis=1,\n )\n\n # get two different outputs\n output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[0]\n output_from_past = model(next_tokens, past_key_values=outputs.past_key_values, attention_mask=attn_mask)[0]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).numpy().item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]\n output_from_past_slice = output_from_past[:, 0, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)\n\n def create_and_check_t5_decoder_model_past_large_inputs(\n self, config, input_ids, decoder_input_ids, attention_mask\n ):\n model = TFT5Model(config=config).get_decoder()\n\n input_ids = input_ids[:1, :]\n attention_mask = attention_mask[:1, :]\n self.batch_size = 1\n\n # first forward pass\n outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_attn_mask = ids_tensor((self.batch_size, 3), 2)\n\n # append to next input_ids and\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)\n\n output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]\n output_from_past = model(\n next_tokens, attention_mask=next_attention_mask, past_key_values=outputs.past_key_values\n )[0]\n\n self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])\n\n # select random slice\n random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]\n output_from_past_slice = output_from_past[:, :, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)\n\n def create_and_check_t5_xla_generate(self, config, input_ids, *args):\n config.eos_token_id = None\n config.max_length = 10\n config.do_sample = False\n config.num_beams = 1\n model = TFT5ForConditionalGeneration(config=config)\n\n # make sure there are no pad tokens in prompt\n input_ids = tf.where(input_ids != config.pad_token_id, input_ids, config.pad_token_id + 5)\n\n generated = model.generate(input_ids)\n\n generate_xla = tf.function(model.generate, jit_compile=True)\n generated_xla = generate_xla(input_ids)\n\n self.parent.assertListEqual(generated.numpy().tolist(), generated_xla.numpy().tolist())\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (config, input_ids, input_mask, token_labels) = config_and_inputs\n inputs_dict = {\n \"input_ids\": input_ids,\n \"decoder_input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n return config, inputs_dict\n\n\n@require_tf\nclass TFT5ModelTest(TFModelTesterMixin, unittest.TestCase):\n\n is_encoder_decoder = True\n all_model_classes = (TFT5Model, TFT5ForConditionalGeneration) if is_tf_available() else ()\n all_generative_model_classes = (TFT5ForConditionalGeneration,) if is_tf_available() else ()\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFT5ModelTester(self)\n self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_t5_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_model(*config_and_inputs)\n\n def test_t5_model_v1_1(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n config = config_and_inputs[0]\n config.tie_word_embeddings = False\n config.feed_forward_proj = \"gated-gelu\"\n self.model_tester.create_and_check_t5_model(config, *config_and_inputs[1:])\n\n def test_with_lm_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs)\n\n def test_t5_decoder_model_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_decoder_model_past(*config_and_inputs)\n\n def test_t5_decoder_model_past_with_attn_mask(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_decoder_model_attention_mask_past(*config_and_inputs)\n\n def test_t5_decoder_model_past_large_inputs(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs)\n\n def test_t5_model_xla_generate(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_t5_xla_generate(*config_and_inputs)\n\n def test_model_common_attributes(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)\n\n if model_class in self.all_generative_model_classes:\n x = model.get_output_embeddings()\n assert isinstance(x, tf.keras.layers.Layer)\n name = model.get_bias()\n assert name is None\n else:\n x = model.get_output_embeddings()\n assert x is None\n name = model.get_bias()\n assert name is None\n\n def test_saved_model_creation(self):\n # This test is too long (>30sec) and makes fail the CI\n pass\n\n @slow\n def test_model_from_pretrained(self):\n model = TFT5Model.from_pretrained(\"t5-small\")\n self.assertIsNotNone(model)\n\n def test_generate_with_headmasking(self):\n # TODO: Fix head-masking according to PyTorch T5 model\n pass\n\n @slow\n def test_resize_embeddings(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n original_vocab_size = model.get_input_embeddings().weight.shape[0]\n # the vocab size is defined in the model config\n self.assertEqual(original_vocab_size, model.config.vocab_size)\n\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n tokenizer.add_special_tokens({\"bos_token\": \"\", \"eos_token\": \"\"})\n model._resize_token_embeddings(len(tokenizer))\n # the vocab size is now resized to the length of the tokenizer, which is different from the original size\n self.assertEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer))\n self.assertNotEqual(model.get_input_embeddings().weight.shape[0], original_vocab_size)\n\n\nclass TFT5EncoderOnlyModelTester:\n def __init__(\n self,\n parent,\n vocab_size=99,\n batch_size=13,\n encoder_seq_length=7,\n # For common tests\n use_attention_mask=True,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n d_ff=37,\n relative_attention_num_buckets=8,\n is_training=False,\n dropout_rate=0.1,\n initializer_factor=0.002,\n is_encoder_decoder=False,\n eos_token_id=1,\n pad_token_id=0,\n scope=None,\n ):\n\n self.parent = parent\n self.batch_size = batch_size\n self.encoder_seq_length = encoder_seq_length\n # For common tests\n self.seq_length = self.encoder_seq_length\n self.use_attention_mask = use_attention_mask\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.d_ff = d_ff\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.dropout_rate = dropout_rate\n self.initializer_factor = initializer_factor\n self.eos_token_id = eos_token_id\n self.pad_token_id = pad_token_id\n self.is_encoder_decoder = is_encoder_decoder\n self.scope = None\n self.is_training = is_training\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)\n\n attention_mask = None\n if self.use_attention_mask:\n attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)\n\n config = T5Config(\n vocab_size=self.vocab_size,\n d_model=self.hidden_size,\n d_ff=self.d_ff,\n d_kv=self.hidden_size // self.num_attention_heads,\n num_layers=self.num_hidden_layers,\n num_heads=self.num_attention_heads,\n relative_attention_num_buckets=self.relative_attention_num_buckets,\n dropout_rate=self.dropout_rate,\n initializer_factor=self.initializer_factor,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.pad_token_id,\n pad_token_id=self.pad_token_id,\n is_encoder_decoder=self.is_encoder_decoder,\n )\n\n return (\n config,\n input_ids,\n attention_mask,\n )\n\n def create_and_check_model(\n self,\n config,\n input_ids,\n attention_mask,\n ):\n model = TFT5EncoderModel(config=config)\n result = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n )\n result = model(input_ids=input_ids)\n encoder_output = result.last_hidden_state\n\n self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n attention_mask,\n ) = config_and_inputs\n\n inputs_dict = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n }\n return config, inputs_dict\n\n\nclass TFT5EncoderOnlyModelTest(TFModelTesterMixin, unittest.TestCase):\n is_encoder_decoder = False\n all_model_classes = (TFT5EncoderModel,) if is_tf_available() else ()\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFT5EncoderOnlyModelTester(self)\n self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n # is not able to be part of a pipeline\n def test_train_pipeline_custom_model(self):\n pass\n\n\n@require_tf\n@require_sentencepiece\n@require_tokenizers\nclass TFT5GenerationIntegrationTests(unittest.TestCase):\n @slow\n def test_greedy_xla_generate_simple(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n sentence = \"Translate English to German: Today is a beautiful day.\"\n input_ids = tokenizer(sentence, return_tensors=\"tf\", padding=True).input_ids\n\n xla_generate = tf.function(model.generate, jit_compile=True)\n\n output_ids = model.generate(input_ids)\n output_ids_xla = xla_generate(input_ids)\n\n output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True)\n\n expected_output_string = [\"Heute ist ein schöner Tag.\"]\n\n self.assertListEqual(expected_output_string, output_strings)\n self.assertListEqual(expected_output_string, output_strings_xla)\n\n @slow\n def test_greedy_generate(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n sentences = [\"Yesterday, my name was\", \"Today is a beautiful day and\"]\n input_ids = tokenizer(sentences, return_tensors=\"tf\", padding=True).input_ids\n\n generation_kwargs = {\n \"bad_words_ids\": [tokenizer(\"my\").input_ids, tokenizer(\"ein schöner\").input_ids],\n \"no_repeat_ngram_size\": 3,\n \"do_sample\": False,\n \"repetition_penalty\": 2.2,\n }\n\n output_ids = model.generate(input_ids, **generation_kwargs)\n\n output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n expected_output_string = [\"Yesterday, my name was\", \"Heute ist ein schöne Tag und\"]\n\n self.assertListEqual(expected_output_string, output_strings)\n\n @slow\n def test_sample_generate(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n sentences = [\"I really love my\", \"Translate English to German: the transformers are truly amazing\"]\n input_ids = tokenizer(sentences, return_tensors=\"tf\", padding=True).input_ids\n\n generation_kwargs = {\n \"do_sample\": True,\n \"bad_words_ids\": [tokenizer(\"my\").input_ids, tokenizer(\"ein schöner\").input_ids],\n \"no_repeat_ngram_size\": 3,\n \"repetition_penalty\": 2.2,\n \"temperature\": 0.8,\n \"top_k\": 500,\n \"top_p\": 0.9,\n }\n\n # forces the generation to happen on CPU, to avoid GPU-related quirks\n with tf.device(\":/CPU:0\"):\n tf.random.set_seed(42) # deterministic sampling sequence -> deterministic generation\n output_ids = model.generate(input_ids, **generation_kwargs)\n\n output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n expected_output_string = [\"i love her I really love my heart\", \"die Transformatoren sind wirklich erstaunlich\"]\n\n self.assertListEqual(expected_output_string, output_strings)\n\n\n@require_tf\n@require_sentencepiece\n@require_tokenizers\nclass TFT5ModelIntegrationTests(unittest.TestCase):\n @cached_property\n def model(self):\n return TFT5ForConditionalGeneration.from_pretrained(\"t5-base\")\n\n @slow\n def test_small_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.7.1\n >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary\n\n >>> path_to_mtf_small_t5_checkpoint = '<fill_in>'\n >>> path_to_mtf_small_spm_model_path = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)\n >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"tf\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"tf\").input_ids\n\n loss = model(input_ids, labels=labels).loss\n mtf_score = -tf.math.reduce_sum(loss).numpy()\n\n EXPECTED_SCORE = -19.0845\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_small_v1_1_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.7.1\n >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary\n\n >>> path_to_mtf_small_t5_v1.1_checkpoint = '<fill_in>'\n >>> path_to_mtf_small_spm_model_path = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1.1_checkpoint, batch_size=1, tpu=None)\n >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = TFT5ForConditionalGeneration.from_pretrained(\"google/t5-v1_1-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"google/t5-v1_1-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"tf\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"tf\").input_ids\n\n loss = model(input_ids, labels=labels).loss\n mtf_score = -tf.math.reduce_sum(loss).numpy()\n\n EXPECTED_SCORE = -59.0293\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_small_byt5_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.9.1\n\n >>> path_to_byt5_small_checkpoint = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)\n >>> vocab = t5.data.ByteVocabulary()\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = TFT5ForConditionalGeneration.from_pretrained(\"google/byt5-small\")\n tokenizer = ByT5Tokenizer.from_pretrained(\"google/byt5-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"tf\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"tf\").input_ids\n\n loss = model(input_ids, labels=labels).loss\n mtf_score = -tf.math.reduce_sum(loss).numpy()\n\n EXPECTED_SCORE = -60.7397\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_summarization(self):\n model = self.model\n tok = T5Tokenizer.from_pretrained(\"t5-base\")\n\n FRANCE_ARTICLE = 'Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that \"so far no videos were used in the crash investigation.\" He added, \"A person who has such a video needs to immediately give it to the investigators.\" Robin\\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. \"One can hear cries of \\'My God\\' in several languages,\" Paris Match reported. \"Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing.\" \"It is a very disturbing scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France\\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were \"completely wrong\" and \"unwarranted.\" Cell phones have been collected at the site, he said, but that they \"hadn\\'t been exploited yet.\" Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical \"no.\" Reichelt told \"Erin Burnett: Outfront\" that he had watched the video and stood by the report, saying Bild and Paris Match are \"very confident\" that the clip is real. He noted that investigators only revealed they\\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. \"That is something we did not know before. ... Overall we can say many things of the investigation weren\\'t revealed by the investigation at the beginning,\" he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a \"previous episode of severe depression,\" the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a \"swift and seamless clarification\" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and \"held all the licenses required.\" Earlier, a spokesman for the prosecutor\\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\\'s license. Kumpa emphasized there\\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was \"a big part of his life,\" the source said, it\\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\\'s more to the story, said Brian Russell, a forensic psychologist. \"Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\\'t going to keep doing their job and they\\'re upset about that and so they\\'re suicidal,\" he said. \"But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\\'s problems.\" Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.' # @noqa\n\n SHORTER_ARTICLE = '(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed \"in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014.\" Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\\'s ceremony, said it was a move toward greater justice. \"As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice,\" he said, according to an ICC news release. \"Indeed, today brings us closer to our shared goals of justice and peace.\" Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. \"As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly,\" she said. Rights group Human Rights Watch welcomed the development. \"Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\\'s treaty should speak out to welcome its membership,\" said Balkees Jarrah, international justice counsel for the group. \"What\\'s objectionable is the attempts to undermine international justice, not Palestine\\'s decision to join a treaty to which over 100 countries around the world are members.\" In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it \"strongly\" disagreed with the court\\'s decision. \"As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC,\" the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. \"We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,\" it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as \"Palestine.\" While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would \"conduct its analysis in full independence and impartiality.\" The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.'\n\n IRAN_ARTICLE = \"(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger. Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a letter to the Iranian leadership warning them away from a deal. The debate that has already begun since the announcement of the new framework will likely result in more heat than light. It will not be helped by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: . The most misleading assertion, despite universal rejection by experts, is that the negotiations' objective at the outset was the total elimination of any nuclear program in Iran. That is the position of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it had been, there would have been no Iranian team at the negotiating table. Rather, the objective has always been to structure an agreement or series of agreements so that Iran could not covertly develop a nuclear arsenal before the United States and its allies could respond. The new framework has exceeded expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite sharp accusations by some in the United States and its allies, Iran denies having such a program, and U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's continued cooperation with International Atomic Energy Agency inspections is further evidence on this point, and we'll know even more about Iran's program in the coming months and years because of the deal. In fact, the inspections provisions that are part of this agreement are designed to protect against any covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter warning that a deal might be killed by Congress or a future president). This of course is not the case. The talks were between Iran and the five permanent members of the U.N. Security Council (United States, United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the agreement should be a formal treaty requiring the Senate to \\\"advise and consent.\\\" But the issue is not suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement with Iran will not be so balanced. The restrictions and obligations in the final framework agreement will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally some insist that any agreement must address Iranian missile programs, human rights violations or support for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in the negotiations would be a poison pill. This agreement should be judged on its merits and on how it affects the security of our negotiating partners and allies, including Israel. Those judgments should be fact-based, not based on questionable assertions or dubious assumptions.\"\n\n ARTICLE_SUBWAY = 'New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared \"I do\" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her \"first and only\" marriage. Barrientos, now 39, is facing two criminal counts of \"offering a false instrument for filing in the first degree,\" referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\\'s Investigation Division. Seven of the men are from so-called \"red-flagged\" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.'\n\n expected_summaries = [\n 'prosecutor: \"so far no videos were used in the crash investigation\" two magazines claim to have found a cell phone video of the final seconds . \"one can hear cries of \\'My God\\' in several languages,\" one magazine says .',\n \"the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a preliminary examination into the situation in the occupied Palestinian territory . as members of the court, Palestinians may be subject to counter-charges as well .\",\n \"the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller: the debate that has already begun since the announcement of the new framework will likely result in more heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and implement a rigorous inspection regime .\",\n 'prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two criminal counts of \"offering a false instrument for filing in the first degree\" she has been married 10 times, with nine of her marriages occurring between 1999 and 2002 .',\n ]\n\n task_specific_config = getattr(model.config, \"task_specific_params\", {})\n summarization_config = task_specific_config.get(\"summarization\", {})\n model.config.update(summarization_config)\n\n dct = tok(\n [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]],\n max_length=512,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"tf\",\n )\n self.assertEqual(512, dct[\"input_ids\"].shape[1])\n\n hypotheses_batch = model.generate(\n input_ids=dct[\"input_ids\"],\n attention_mask=dct[\"attention_mask\"],\n num_beams=4,\n length_penalty=2.0,\n max_length=142,\n min_length=56,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n\n decoded = [\n tok.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in hypotheses_batch\n ]\n\n self.assertListEqual(\n expected_summaries,\n decoded,\n )\n\n @slow\n def test_translation_en_to_de(self):\n tok = T5Tokenizer.from_pretrained(\"t5-base\")\n model = self.model\n\n task_specific_config = getattr(model.config, \"task_specific_params\", {})\n translation_config = task_specific_config.get(\"translation_en_to_de\", {})\n self.model.config.update(translation_config)\n\n original_input = '\"Luigi often said to me that he never wanted the brothers to end up in court\", she wrote.'\n expected_translation = (\n '\"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen\", schrieb sie.'\n )\n\n input_ids = tok.encode(model.config.prefix + original_input, return_tensors=\"tf\")\n\n output = model.generate(\n input_ids=input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=50,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n\n self.assertEqual(translation, expected_translation)\n\n @slow\n def test_translation_en_to_fr(self):\n model = self.model\n tok = T5Tokenizer.from_pretrained(\"t5-base\")\n\n task_specific_config = getattr(model.config, \"task_specific_params\", {})\n translation_config = task_specific_config.get(\"translation_en_to_fr\", {})\n model.config.update(translation_config)\n\n en_text = ' This image section from an infrared recording by the Spitzer telescope shows a \"family portrait\" of countless generations of stars: the oldest stars are seen as blue dots. '\n\n new_truncated_translation = (\n \"Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre \"\n \"un \"\n \"« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées \"\n \"sous forme \"\n \"de points bleus.\"\n )\n\n input_ids = tok(model.config.prefix + en_text, return_tensors=\"tf\").input_ids\n\n output = model.generate(\n input_ids=input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=100,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n\n self.assertEqual(translation, new_truncated_translation)\n\n @slow\n def test_translation_en_to_ro(self):\n model = self.model\n tok = T5Tokenizer.from_pretrained(\"t5-base\")\n\n task_specific_config = getattr(model.config, \"task_specific_params\", {})\n translation_config = task_specific_config.get(\"translation_en_to_ro\", {})\n model.config.update(translation_config)\n\n original_input = \"Taco Bell said it plans to add 2,000 locations in the US by 2022.\"\n expected_translation = \"Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022.\"\n\n input_ids = tok.encode(model.config.prefix + original_input, return_tensors=\"tf\")\n\n output = model.generate(\n input_ids=input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=50,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n\n self.assertEqual(translation, expected_translation)\n\n def test_finetune_keras_trainer(self):\n \"\"\"Ensure that the model can be fine-tuned via the keras API and\n that metrics work as expected.\n \"\"\"\n\n # This metric expects to be called with the logits output\n def _accuracy(y_true, y_pred):\n return tf.keras.metrics.sparse_categorical_crossentropy(y_true[:, 0], y_pred[:, 0])\n\n # measure the accuracy of the first token\n class FirstTokenAccuracy(tf.keras.metrics.MeanMetricWrapper):\n def __init__(self, name=\"accuracy\", **kwargs):\n super().__init__(_accuracy, name=name, **kwargs)\n\n model = self.model\n model.compile(\"adam\", metrics=FirstTokenAccuracy())\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n examples = [\n (\"sentiment: Everything is awesome!\", \"positive\"),\n (\"sentiment: Tensorflow datasets are hard to use\", \"negative\"),\n ]\n\n inputs = dict(tokenizer([x[0] for x in examples], padding=True, return_tensors=\"tf\"))\n inputs[\"labels\"] = tokenizer([x[1] for x in examples], return_tensors=\"tf\").input_ids\n\n model.fit(inputs)\n m = model.evaluate(inputs)\n self.assertEqual(len(m), 2)\n",
"# coding=utf-8\n# Copyright 2020 Microsoft and the Hugging Face Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch DeBERTa-v2 model.\"\"\"\n\nimport math\nfrom collections.abc import Sequence\n\nimport numpy as np\nimport torch\nfrom torch import _softmax_backward_data, nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_outputs import (\n BaseModelOutput,\n MaskedLMOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_deberta_v2 import DebertaV2Config\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"DebertaV2Config\"\n_TOKENIZER_FOR_DOC = \"DebertaV2Tokenizer\"\n_CHECKPOINT_FOR_DOC = \"microsoft/deberta-v2-xlarge\"\n\nDEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/deberta-v2-xlarge\",\n \"microsoft/deberta-v2-xxlarge\",\n \"microsoft/deberta-v2-xlarge-mnli\",\n \"microsoft/deberta-v2-xxlarge-mnli\",\n]\n\n\n# Copied from transformers.models.deberta.modeling_deberta.ContextPooler\nclass ContextPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)\n self.dropout = StableDropout(config.pooler_dropout)\n self.config = config\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n\n context_token = hidden_states[:, 0]\n context_token = self.dropout(context_token)\n pooled_output = self.dense(context_token)\n pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)\n return pooled_output\n\n @property\n def output_dim(self):\n return self.config.hidden_size\n\n\n# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2\nclass XSoftmax(torch.autograd.Function):\n \"\"\"\n Masked Softmax which is optimized for saving memory\n\n Args:\n input (`torch.tensor`): The input tensor that will apply softmax.\n mask (`torch.IntTensor`):\n The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\n dim (int): The dimension that will apply softmax\n\n Example:\n\n ```python\n >>> import torch\n >>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax\n\n >>> # Make a tensor\n >>> x = torch.randn([4, 20, 100])\n\n >>> # Create a mask\n >>> mask = (x > 0).int()\n\n >>> # Specify the dimension to apply softmax\n >>> dim = -1\n\n >>> y = XSoftmax.apply(x, mask, dim)\n ```\"\"\"\n\n @staticmethod\n def forward(self, input, mask, dim):\n self.dim = dim\n rmask = ~(mask.bool())\n\n output = input.masked_fill(rmask, float(\"-inf\"))\n output = torch.softmax(output, self.dim)\n output.masked_fill_(rmask, 0)\n self.save_for_backward(output)\n return output\n\n @staticmethod\n def backward(self, grad_output):\n (output,) = self.saved_tensors\n inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)\n return inputGrad, None, None\n\n @staticmethod\n def symbolic(g, self, mask, dim):\n import torch.onnx.symbolic_helper as sym_help\n from torch.onnx.symbolic_opset9 import masked_fill, softmax\n\n mask_cast_value = g.op(\"Cast\", mask, to_i=sym_help.cast_pytorch_to_onnx[\"Long\"])\n r_mask = g.op(\n \"Cast\",\n g.op(\"Sub\", g.op(\"Constant\", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),\n to_i=sym_help.cast_pytorch_to_onnx[\"Int\"],\n )\n output = masked_fill(g, self, r_mask, g.op(\"Constant\", value_t=torch.tensor(float(\"-inf\"))))\n output = softmax(g, output, dim)\n return masked_fill(g, output, r_mask, g.op(\"Constant\", value_t=torch.tensor(0, dtype=torch.int)))\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DropoutContext\nclass DropoutContext(object):\n def __init__(self):\n self.dropout = 0\n self.mask = None\n self.scale = 1\n self.reuse_mask = True\n\n\n# Copied from transformers.models.deberta.modeling_deberta.get_mask\ndef get_mask(input, local_context):\n if not isinstance(local_context, DropoutContext):\n dropout = local_context\n mask = None\n else:\n dropout = local_context.dropout\n dropout *= local_context.scale\n mask = local_context.mask if local_context.reuse_mask else None\n\n if dropout > 0 and mask is None:\n mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()\n\n if isinstance(local_context, DropoutContext):\n if local_context.mask is None:\n local_context.mask = mask\n\n return mask, dropout\n\n\n# Copied from transformers.models.deberta.modeling_deberta.XDropout\nclass XDropout(torch.autograd.Function):\n \"\"\"Optimized dropout function to save computation and memory by using mask operation instead of multiplication.\"\"\"\n\n @staticmethod\n def forward(ctx, input, local_ctx):\n mask, dropout = get_mask(input, local_ctx)\n ctx.scale = 1.0 / (1 - dropout)\n if dropout > 0:\n ctx.save_for_backward(mask)\n return input.masked_fill(mask, 0) * ctx.scale\n else:\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.scale > 1:\n (mask,) = ctx.saved_tensors\n return grad_output.masked_fill(mask, 0) * ctx.scale, None\n else:\n return grad_output, None\n\n\n# Copied from transformers.models.deberta.modeling_deberta.StableDropout\nclass StableDropout(nn.Module):\n \"\"\"\n Optimized dropout module for stabilizing the training\n\n Args:\n drop_prob (float): the dropout probabilities\n \"\"\"\n\n def __init__(self, drop_prob):\n super().__init__()\n self.drop_prob = drop_prob\n self.count = 0\n self.context_stack = None\n\n def forward(self, x):\n \"\"\"\n Call the module\n\n Args:\n x (`torch.tensor`): The input tensor to apply dropout\n \"\"\"\n if self.training and self.drop_prob > 0:\n return XDropout.apply(x, self.get_context())\n return x\n\n def clear_context(self):\n self.count = 0\n self.context_stack = None\n\n def init_context(self, reuse_mask=True, scale=1):\n if self.context_stack is None:\n self.context_stack = []\n self.count = 0\n for c in self.context_stack:\n c.reuse_mask = reuse_mask\n c.scale = scale\n\n def get_context(self):\n if self.context_stack is not None:\n if self.count >= len(self.context_stack):\n self.context_stack.append(DropoutContext())\n ctx = self.context_stack[self.count]\n ctx.dropout = self.drop_prob\n self.count += 1\n return ctx\n else:\n return self.drop_prob\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm\nclass DebertaV2SelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2\nclass DebertaV2Attention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = DisentangledSelfAttention(config)\n self.output = DebertaV2SelfOutput(config)\n self.config = config\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_attentions=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n self_output = self.self(\n hidden_states,\n attention_mask,\n output_attentions,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if output_attentions:\n self_output, att_matrix = self_output\n if query_states is None:\n query_states = hidden_states\n attention_output = self.output(self_output, query_states)\n\n if output_attentions:\n return (attention_output, att_matrix)\n else:\n return attention_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2\nclass DebertaV2Intermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm\nclass DebertaV2Output(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2\nclass DebertaV2Layer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = DebertaV2Attention(config)\n self.intermediate = DebertaV2Intermediate(config)\n self.output = DebertaV2Output(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n output_attentions=False,\n ):\n attention_output = self.attention(\n hidden_states,\n attention_mask,\n output_attentions=output_attentions,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if output_attentions:\n attention_output, att_matrix = attention_output\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n if output_attentions:\n return (layer_output, att_matrix)\n else:\n return layer_output\n\n\nclass ConvLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n kernel_size = getattr(config, \"conv_kernel_size\", 3)\n groups = getattr(config, \"conv_groups\", 1)\n self.conv_act = getattr(config, \"conv_act\", \"tanh\")\n self.conv = nn.Conv1d(\n config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups\n )\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, residual_states, input_mask):\n out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()\n rmask = (1 - input_mask).bool()\n out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)\n out = ACT2FN[self.conv_act](self.dropout(out))\n\n layer_norm_input = residual_states + out\n output = self.LayerNorm(layer_norm_input).to(layer_norm_input)\n\n if input_mask is None:\n output_states = output\n else:\n if input_mask.dim() != layer_norm_input.dim():\n if input_mask.dim() == 4:\n input_mask = input_mask.squeeze(1).squeeze(1)\n input_mask = input_mask.unsqueeze(2)\n\n input_mask = input_mask.to(output.dtype)\n output_states = output * input_mask\n\n return output_states\n\n\nclass DebertaV2Encoder(nn.Module):\n \"\"\"Modified BertEncoder with relative position bias support\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])\n self.relative_attention = getattr(config, \"relative_attention\", False)\n\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n\n self.position_buckets = getattr(config, \"position_buckets\", -1)\n pos_ebd_size = self.max_relative_positions * 2\n\n if self.position_buckets > 0:\n pos_ebd_size = self.position_buckets * 2\n\n self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)\n\n self.norm_rel_ebd = [x.strip() for x in getattr(config, \"norm_rel_ebd\", \"none\").lower().split(\"|\")]\n\n if \"layer_norm\" in self.norm_rel_ebd:\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)\n\n self.conv = ConvLayer(config) if getattr(config, \"conv_kernel_size\", 0) > 0 else None\n self.gradient_checkpointing = False\n\n def get_rel_embedding(self):\n rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None\n if rel_embeddings is not None and (\"layer_norm\" in self.norm_rel_ebd):\n rel_embeddings = self.LayerNorm(rel_embeddings)\n return rel_embeddings\n\n def get_attention_mask(self, attention_mask):\n if attention_mask.dim() <= 2:\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)\n attention_mask = attention_mask\n elif attention_mask.dim() == 3:\n attention_mask = attention_mask.unsqueeze(1)\n\n return attention_mask\n\n def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):\n if self.relative_attention and relative_pos is None:\n q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)\n relative_pos = build_relative_position(\n q, hidden_states.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions\n )\n return relative_pos\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_hidden_states=True,\n output_attentions=False,\n query_states=None,\n relative_pos=None,\n return_dict=True,\n ):\n if attention_mask.dim() <= 2:\n input_mask = attention_mask\n else:\n input_mask = (attention_mask.sum(-2) > 0)\n attention_mask = self.get_attention_mask(attention_mask)\n relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)\n\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[0]\n else:\n next_kv = hidden_states\n rel_embeddings = self.get_rel_embedding()\n output_states = next_kv\n for i, layer_module in enumerate(self.layer):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (output_states,)\n\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n output_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n next_kv,\n attention_mask,\n query_states,\n relative_pos,\n rel_embeddings,\n )\n else:\n output_states = layer_module(\n next_kv,\n attention_mask,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n output_attentions=output_attentions,\n )\n\n if output_attentions:\n output_states, att_m = output_states\n\n if i == 0 and self.conv is not None:\n output_states = self.conv(hidden_states, output_states, input_mask)\n\n if query_states is not None:\n query_states = output_states\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None\n else:\n next_kv = output_states\n\n if output_attentions:\n all_attentions = all_attentions + (att_m,)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (output_states,)\n\n if not return_dict:\n return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\ndef make_log_bucket_position(relative_pos, bucket_size, max_position):\n sign = np.sign(relative_pos)\n mid = bucket_size // 2\n abs_pos = np.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, np.abs(relative_pos))\n log_pos = np.ceil(np.log(abs_pos / mid) / np.log((max_position - 1) / mid) * (mid - 1)) + mid\n bucket_pos = np.where(abs_pos <= mid, relative_pos, log_pos * sign).astype(np.int)\n return bucket_pos\n\n\ndef build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):\n \"\"\"\n Build relative position according to the query and key\n\n We assume the absolute position of query \\\\(P_q\\\\) is range from (0, query_size) and the absolute position of key\n \\\\(P_k\\\\) is range from (0, key_size), The relative positions from query to key is \\\\(R_{q \\\\rightarrow k} = P_q -\n P_k\\\\)\n\n Args:\n query_size (int): the length of query\n key_size (int): the length of key\n bucket_size (int): the size of position bucket\n max_position (int): the maximum allowed absolute position\n\n Return:\n `torch.LongTensor`: A tensor with shape [1, query_size, key_size]\n\n \"\"\"\n q_ids = np.arange(0, query_size)\n k_ids = np.arange(0, key_size)\n rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0], 1))\n if bucket_size > 0 and max_position > 0:\n rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)\n rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long)\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = rel_pos_ids.unsqueeze(0)\n return rel_pos_ids\n\n\[email protected]\n# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand\ndef c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])\n\n\[email protected]\n# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand\ndef p2c_dynamic_expand(c2p_pos, query_layer, key_layer):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])\n\n\[email protected]\n# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand\ndef pos_dynamic_expand(pos_index, p2c_att, key_layer):\n return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))\n\n\nclass DisentangledSelfAttention(nn.Module):\n \"\"\"\n Disentangled self-attention module\n\n Parameters:\n config (`DebertaV2Config`):\n A model config class instance with the configuration to build a new model. The schema is similar to\n *BertConfig*, for more details, please refer [`DebertaV2Config`]\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n self.num_attention_heads = config.num_attention_heads\n _attention_head_size = config.hidden_size // config.num_attention_heads\n self.attention_head_size = getattr(config, \"attention_head_size\", _attention_head_size)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)\n self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)\n self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)\n\n self.share_att_key = getattr(config, \"share_att_key\", False)\n self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []\n self.relative_attention = getattr(config, \"relative_attention\", False)\n\n if self.relative_attention:\n self.position_buckets = getattr(config, \"position_buckets\", -1)\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.pos_ebd_size = self.max_relative_positions\n if self.position_buckets > 0:\n self.pos_ebd_size = self.position_buckets\n\n self.pos_dropout = StableDropout(config.hidden_dropout_prob)\n\n if not self.share_att_key:\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = StableDropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x, attention_heads):\n new_x_shape = x.size()[:-1] + (attention_heads, -1)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_attentions=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n \"\"\"\n Call the module\n\n Args:\n hidden_states (`torch.FloatTensor`):\n Input states to the module usually the output from previous layer, it will be the Q,K and V in\n *Attention(Q,K,V)*\n\n attention_mask (`torch.ByteTensor`):\n An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum\n sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*\n th token.\n\n output_attentions (`bool`, optional):\n Whether return the attention matrix.\n\n query_states (`torch.FloatTensor`, optional):\n The *Q* state in *Attention(Q,K,V)*.\n\n relative_pos (`torch.LongTensor`):\n The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with\n values ranging in [*-max_relative_positions*, *max_relative_positions*].\n\n rel_embeddings (`torch.FloatTensor`):\n The embedding of relative distances. It's a tensor of shape [\\\\(2 \\\\times\n \\\\text{max_relative_positions}\\\\), *hidden_size*].\n\n\n \"\"\"\n if query_states is None:\n query_states = hidden_states\n query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)\n key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)\n value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)\n\n rel_att = None\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n scale_factor = 1\n if \"c2p\" in self.pos_att_type:\n scale_factor += 1\n if \"p2c\" in self.pos_att_type:\n scale_factor += 1\n if \"p2p\" in self.pos_att_type:\n scale_factor += 1\n scale = math.sqrt(query_layer.size(-1) * scale_factor)\n attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale\n if self.relative_attention:\n rel_embeddings = self.pos_dropout(rel_embeddings)\n rel_att = self.disentangled_attention_bias(\n query_layer, key_layer, relative_pos, rel_embeddings, scale_factor\n )\n\n if rel_att is not None:\n attention_scores = attention_scores + rel_att\n attention_scores = attention_scores\n attention_scores = attention_scores.view(\n -1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)\n )\n\n # bsz x height x length x dimension\n attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)\n attention_probs = self.dropout(attention_probs)\n context_layer = torch.bmm(\n attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer\n )\n context_layer = (\n context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))\n .permute(0, 2, 1, 3)\n .contiguous()\n )\n new_context_layer_shape = context_layer.size()[:-2] + (-1,)\n context_layer = context_layer.view(*new_context_layer_shape)\n if output_attentions:\n return (context_layer, attention_probs)\n else:\n return context_layer\n\n def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):\n if relative_pos is None:\n q = query_layer.size(-2)\n relative_pos = build_relative_position(\n q, key_layer.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions\n )\n if relative_pos.dim() == 2:\n relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)\n elif relative_pos.dim() == 3:\n relative_pos = relative_pos.unsqueeze(1)\n # bsz x height x query x key\n elif relative_pos.dim() != 4:\n raise ValueError(f\"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}\")\n\n att_span = self.pos_ebd_size\n relative_pos = relative_pos.long().to(query_layer.device)\n\n rel_embeddings = rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :].unsqueeze(0)\n if self.share_att_key:\n pos_query_layer = self.transpose_for_scores(\n self.query_proj(rel_embeddings), self.num_attention_heads\n ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)\n pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)\n else:\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_key_layer = self.transpose_for_scores(\n self.pos_key_proj(rel_embeddings), self.num_attention_heads\n ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer = self.transpose_for_scores(\n self.pos_query_proj(rel_embeddings), self.num_attention_heads\n ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)\n \n score = 0\n # content->position\n if \"c2p\" in self.pos_att_type:\n scale = math.sqrt(pos_key_layer.size(-1) * scale_factor)\n c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))\n c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)\n c2p_att = torch.gather(\n c2p_att,\n dim=-1,\n index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),\n )\n score += c2p_att / scale\n\n # position->content\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n scale = math.sqrt(pos_query_layer.size(-1) * scale_factor)\n if key_layer.size(-2) != query_layer.size(-2):\n r_pos = build_relative_position(\n key_layer.size(-2),\n key_layer.size(-2),\n bucket_size=self.position_buckets,\n max_position=self.max_relative_positions,\n ).to(query_layer.device)\n r_pos = r_pos.unsqueeze(0)\n else:\n r_pos = relative_pos\n\n p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)\n\n if \"p2c\" in self.pos_att_type:\n p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))\n p2c_att = torch.gather(\n p2c_att,\n dim=-1,\n index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),\n ).transpose(-1, -2)\n score += p2c_att / scale\n\n # position->position\n if \"p2p\" in self.pos_att_type:\n pos_query = pos_query_layer[:, :, att_span:, :]\n p2p_att = torch.matmul(pos_query, pos_key_layer.transpose(-1, -2))\n p2p_att = p2p_att.expand(query_layer.size()[:2] + p2p_att.size()[2:])\n p2p_att = torch.gather(\n p2p_att,\n dim=-1,\n index=c2p_pos.expand(\n [query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]\n ),\n )\n score += p2p_att\n\n return score\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm\nclass DebertaV2Embeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n pad_token_id = getattr(config, \"pad_token_id\", 0)\n self.embedding_size = getattr(config, \"embedding_size\", config.hidden_size)\n self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)\n\n self.position_biased_input = getattr(config, \"position_biased_input\", True)\n if not self.position_biased_input:\n self.position_embeddings = None\n else:\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)\n\n if config.type_vocab_size > 0:\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)\n\n if self.embedding_size != config.hidden_size:\n self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.position_embeddings is not None:\n position_embeddings = self.position_embeddings(position_ids.long())\n else:\n position_embeddings = torch.zeros_like(inputs_embeds)\n\n embeddings = inputs_embeds\n if self.position_biased_input:\n embeddings += position_embeddings\n if self.config.type_vocab_size > 0:\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings += token_type_embeddings\n\n if self.embedding_size != self.config.hidden_size:\n embeddings = self.embed_proj(embeddings)\n\n embeddings = self.LayerNorm(embeddings)\n\n if mask is not None:\n if mask.dim() != embeddings.dim():\n if mask.dim() == 4:\n mask = mask.squeeze(1).squeeze(1)\n mask = mask.unsqueeze(2)\n mask = mask.to(embeddings.dtype)\n\n embeddings = embeddings * mask\n\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2\nclass DebertaV2PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = DebertaV2Config\n base_model_prefix = \"deberta\"\n _keys_to_ignore_on_load_missing = [\"position_ids\"]\n _keys_to_ignore_on_load_unexpected = [\"position_embeddings\"]\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, DebertaV2Encoder):\n module.gradient_checkpointing = value\n\n\nDEBERTA_START_DOCSTRING = r\"\"\"\n The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled\n Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build\n on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.```\n\n\n Parameters:\n config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nDEBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`DebertaV2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2\nclass DebertaV2Model(DebertaV2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = DebertaV2Embeddings(config)\n self.encoder = DebertaV2Encoder(config)\n self.z_steps = 0\n self.config = config\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n raise NotImplementedError(\"The prune function is not implemented in DeBERTa model.\")\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n mask=attention_mask,\n inputs_embeds=inputs_embeds,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask,\n output_hidden_states=True,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n encoded_layers = encoder_outputs[1]\n\n if self.z_steps > 1:\n hidden_states = encoded_layers[-2]\n layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]\n query_states = encoded_layers[-1]\n rel_embeddings = self.encoder.get_rel_embedding()\n attention_mask = self.encoder.get_attention_mask(attention_mask)\n rel_pos = self.encoder.get_rel_pos(embedding_output)\n for layer in layers[1:]:\n query_states = layer(\n hidden_states,\n attention_mask,\n output_attentions=False,\n query_states=query_states,\n relative_pos=rel_pos,\n rel_embeddings=rel_embeddings,\n )\n encoded_layers.append(query_states)\n\n sequence_output = encoded_layers[-1]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]\n \n return BaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"DeBERTa Model with a `language modeling` head on top.\"\"\", DEBERTA_START_DOCSTRING)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM with Deberta->DebertaV2\nclass DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.deberta = DebertaV2Model(config)\n self.cls = DebertaV2OnlyMLMHead(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta\nclass DebertaV2PredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta\nclass DebertaV2LMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = DebertaV2PredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta\nclass DebertaV2OnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = DebertaV2LMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification with Deberta->DebertaV2\nclass DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n num_labels = getattr(config, \"num_labels\", 2)\n self.num_labels = num_labels\n\n self.deberta = DebertaV2Model(config)\n self.pooler = ContextPooler(config)\n output_dim = self.pooler.output_dim\n\n self.classifier = nn.Linear(output_dim, num_labels)\n drop_out = getattr(config, \"cls_dropout\", None)\n drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out\n self.dropout = StableDropout(drop_out)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.deberta.get_input_embeddings()\n\n def set_input_embeddings(self, new_embeddings):\n self.deberta.set_input_embeddings(new_embeddings)\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n encoder_layer = outputs[0]\n pooled_output = self.pooler(encoder_layer)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n # regression task\n loss_fn = nn.MSELoss()\n logits = logits.view(-1).to(labels.dtype)\n loss = loss_fn(logits, labels.view(-1))\n elif labels.dim() == 1 or labels.size(-1) == 1:\n label_index = (labels >= 0).nonzero()\n labels = labels.long()\n if label_index.size(0) > 0:\n labeled_logits = torch.gather(\n logits, 0, label_index.expand(label_index.size(0), logits.size(1))\n )\n labels = torch.gather(labels, 0, label_index.view(-1))\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))\n else:\n loss = torch.tensor(0).to(logits)\n else:\n log_softmax = nn.LogSoftmax(-1)\n loss = -((log_softmax(logits) * labels).sum(-1)).mean()\n elif self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2\nclass DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaV2Model(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering with Deberta->DebertaV2\nclass DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaV2Model(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] | [
[
"tensorflow.device",
"tensorflow.concat",
"tensorflow.range",
"tensorflow.zeros",
"tensorflow.keras.metrics.sparse_categorical_crossentropy",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.debugging.assert_near",
"tensorflow.function",
"tensorflow.math.reduce_sum",
"tensorflow.where",
"tensorflow.random.set_seed"
],
[
"torch.zeros",
"torch.onnx.symbolic_opset9.softmax",
"torch.nn.Embedding",
"torch._softmax_backward_data",
"torch.nn.BCEWithLogitsLoss",
"numpy.where",
"torch.softmax",
"torch.nn.Dropout",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"numpy.arange",
"torch.tensor",
"torch.arange",
"numpy.log",
"torch.nn.LogSoftmax",
"torch.empty_like",
"torch.zeros_like",
"torch.nn.Linear",
"torch.nn.Conv1d",
"numpy.abs",
"numpy.tile",
"torch.nn.LayerNorm",
"numpy.sign",
"torch.clamp",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SantiagoJN/spatialaudiogen | [
"5092b8988731f9704914beb44c5688a819508ade"
] | [
"pyutils/iolib/audio.py"
] | [
"import os\nimport scipy.signal\nimport numpy as np\nfrom soundfile import SoundFile\nfrom pyutils.iolib.video import getFFprobeMeta\nfrom pyutils.cmd import runSystemCMD\n# from scikits.audiolab import Sndfile, Format\nimport tempfile\nimport resampy\n# import librosa\n\n\ndef load_wav(fname, rate=None):\n # fp = Sndfile(fname, 'r')\n fp = SoundFile(fname, 'r')\n #_signal = fp.read_frames(fp.nframes)\n _signal = fp.buffer_read(dtype=\"int32\")\n _signal = np.asarray(_signal).reshape((-1, fp.channels))\n _rate = fp.samplerate\n\n if _signal.ndim == 1:\n _signal.reshape((-1, 1))\n if rate is not None and rate != _rate:\n # _num_frames = _signal.shape[0]\n # _duration = _num_frames / float(_rate)\n # signal = scipy.signal.resample(_signal, int(rate * _duration))\n signal = resampy.resample(_signal, _rate, rate, axis=0, filter='kaiser_fast')\n else:\n signal = _signal\n rate = _rate\n\n return signal, rate\n\ndef save_wav(fname, signal, rate):\n fp = SoundFile(fname, 'w', rate, signal.shape[1])\n #fp.write(fname, signal, rate)\n #print(f'########################fp: {fp}')\n fp.write(signal)\n # with SoundFile(fname, 'w', rate, signal.shape[1], 'PCM_24') as f:\n # f.write(signal)\n #fp.close()\n\n # Intento 3\n # y, sr = librosa.load(librosa.util.example_audio_file(), duration=5.0)\n # librosa.output.write_wav(fname, signal, rate)\n # fp = SoundFile(fname, 'w', rate, signal.shape[1])\n # # d, sr = fp.read()\n # fp.write(signal)\n\n # Intento 4\n \n\n\ndef convert2wav(inp_fn, out_fn, rate=None):\n cmd = ['ffmpeg', '-y',\n '-i', inp_fn,\n '-map', '0:a',\n '-acodec', 'pcm_s16le']\n if rate is not None:\n cmd += ['-ar', str(rate),]\n cmd += [out_fn]\n\n stdout, stderr = runSystemCMD(' '.join(cmd))\n if any([l.startswith('Output file is empty,')\n for l in stderr.split('\\n')]):\n raise (ValueError, 'Output file is empty.\\n' + stderr)\n\n\nclass AudioReader:\n def __init__(self, fn, rate=None, pad_start=0, seek=None, duration=None, rotation=None):\n fp = Sndfile(fn, 'r') if fn.endswith('.wav') else None\n if fp is None or (rate is not None and fp.samplerate != rate):\n # Convert to wav file\n if not os.path.isdir('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/'):\n os.makedirs('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/')\n snd_file = tempfile.NamedTemporaryFile('w', prefix='c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/', suffix='.wav', delete=False)\n snd_file.close()\n\n convert2wav(fn, snd_file.name, rate)\n self.snd_fn = snd_file.name\n self.rm_flag = True\n\n else:\n self.snd_fn = fn\n self.rm_flag = False\n\n self.fp = Sndfile(self.snd_fn, 'r')\n self.num_channels = self.fp.channels\n self.rate = self.fp.samplerate\n self.num_frames = self.fp.nframes\n self.duration = self.num_frames / float(self.rate)\n\n self.k = 0\n self.pad = pad_start\n\n if seek is not None and seek > 0:\n num_frames = int(seek * self.rate)\n self.fp.read_frames(num_frames)\n else:\n seek = 0\n\n if duration is not None:\n self.duration = min(duration, self.duration-seek)\n self.num_frames = int(self.duration * self.rate)\n\n if rotation is not None:\n assert self.num_channels > 2 # Spatial audio\n assert -np.pi <= rotation < np.pi\n c = np.cos(rotation)\n s = np.sin(rotation)\n rot_mtx = np.array([[1, 0, 0, 0], # W' = W\n [0, c, 0, s], # Y' = X sin + Y cos\n [0, 0, 1, 0], # Z' = Z\n [0, -s, 0, c]]) # X' = X cos - Y sin\n self.rot_mtx = rot_mtx\n else:\n self.rot_mtx = None\n\n def __del__(self):\n if self.rm_flag:\n os.remove(self.snd_fn)\n\n def get_chunk(self, n=1, force_size=False):\n if self.k >= self.num_frames:\n return None\n\n frames_left = self.num_frames - self.k\n if force_size and n > frames_left:\n return None\n\n # Pad zeros to start\n if self.pad > 0:\n pad_size = min(n, self.pad)\n pad_chunk = np.zeros((pad_size, self.num_channels))\n n -= pad_size\n self.pad -= pad_size\n else:\n pad_chunk = None\n\n # Read frames\n chunk_size = min(n, frames_left)\n chunk = self.fp.read_frames(chunk_size)\n chunk = chunk.reshape((chunk.shape[0], self.num_channels))\n self.k += chunk_size\n\n if pad_chunk is not None:\n chunk = np.concatenate((pad_chunk.astype(chunk.dtype), chunk), 0)\n\n if self.rot_mtx is not None:\n chunk = np.dot(chunk, self.rot_mtx.T)\n\n return chunk\n\n def loop_chunks(self, n=1, force_size=False):\n while True:\n chunk = self.get_chunk(n, force_size=False)\n if chunk is None:\n break\n yield chunk\n\nclass AudioReader2:\n def __init__(self, audio_folder, rate=None,\n seek=0, duration=None, rotation=None):\n self.audio_folder = audio_folder\n\n fns = os.listdir(audio_folder)\n self.num_files = len(fns)\n\n # fp = Sndfile(os.path.join(self.audio_folder, fns[0]), 'r')\n fp = SoundFile(os.path.join(self.audio_folder, fns[0]), 'r')\n data, fps = load_wav(os.path.join(self.audio_folder, fns[0]))\n self.rate = float(fp.samplerate) if rate is not None else fps\n self.num_channels = fp.channels\n self.duration = self.num_files\n self.num_frames = int(self.duration * rate)\n\n self.cur_frame = int(seek * self.rate)\n self.time = self.cur_frame / self.rate\n\n self.max_time = self.duration\n if duration is not None:\n self.max_time = min(seek + duration, self.max_time)\n\n if rotation is not None:\n assert self.num_channels > 2 # Spatial audio\n assert -np.pi <= rotation < np.pi\n c = np.cos(rotation)\n s = np.sin(rotation)\n rot_mtx = np.array([[1, 0, 0, 0], # W' = W\n [0, c, 0, s], # Y' = X sin + Y cos\n [0, 0, 1, 0], # Z' = Z\n [0, -s, 0, c]]) # X' = X cos - Y sin\n self.rot_mtx = rot_mtx\n else:\n self.rot_mtx = None\n\n def get(self, start_time, size):\n index = range(int(start_time), int(start_time + size / self.rate) + 1)\n fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))\n for i in index]\n chunk = []\n for fn in fns:\n if not os.path.exists(fn):\n return None\n data, _ = load_wav(fn, self.rate)\n chunk.append(data)\n\n chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]\n ss = int((start_time - int(start_time)) * self.rate)\n chunk = chunk[ss:ss+size, :]\n\n return chunk\n\n def get_chunk(self, n=1, force_size=False):\n if self.time >= self.max_time:\n return None\n\n frames_left = int((self.max_time - self.time) * self.rate)\n if force_size and n > frames_left:\n return None\n\n # Read frames\n chunk_size = min(n, frames_left)\n start_time = self.cur_frame / self.rate\n end_frame_no = self.cur_frame + chunk_size - 1\n end_time = end_frame_no / self.rate\n\n index = range(int(start_time), int(end_time) + 1)\n fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))\n for i in index]\n chunk = []\n for fn in fns:\n data, _ = load_wav(fn, self.rate)\n chunk.append(data)\n chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]\n ss = int((self.time - int(self.time)) * self.rate)\n chunk = chunk[ss:ss+chunk_size, :]\n self.cur_frame += chunk.shape[0]\n self.time = self.cur_frame / self.rate\n\n if self.rot_mtx is not None:\n chunk = np.dot(chunk, self.rot_mtx.T)\n\n return chunk\n\n def loop_chunks(self, n=1, force_size=False):\n while True:\n chunk = self.get_chunk(n, force_size=False)\n if chunk is None:\n break\n yield chunk\n\n\ndef test_audio_reader():\n reader = AudioReader2('/gpu2_data/morgado/spatialaudiogen/youtube/train/687gkvLi5kI/ambix',\n rate=10000, seek=0, duration=5.5)\n for s in reader.loop_chunks(10000):\n print(s.shape), s.max(), s.min()\n# test_audio_reader()\n\n"
] | [
[
"numpy.dot",
"numpy.asarray",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mruffalo/epiScanpy | [
"bcb86347d2b8451c384f97162625c8d5efb27ffc"
] | [
"episcanpy/preprocessing/_readimpute.py"
] | [
"import numpy as np\nimport anndata as ad\nimport pandas as pd\n\ndef load_met_noimput(matrix_file, path='', save=False):\n \"\"\"\n read the raw count matrix and convert it into an AnnData object.\n write down the matrix as .h5ad (AnnData object) if save = True.\n Return AnnData object\n \"\"\"\n matrix = []\n cell_names = []\n feature_names = []\n with open(path+matrix_file) as f:\n line = f.readline()[:-2].split('\\t')\n if line[0] == 'sample_name':\n feature_names = line[1:]\n else:\n matrix.append(line[1:])\n cell_names.append(line[0])\n if matrix == []:\n line = f.readline()[:-2].split('\\t')\n matrix.append(line[1:])\n cell_names.append(line[0])\n for line in f:\n line = line[:-2].split('\\t')\n matrix.append(line[1:])\n cell_names.append(line[0])\n\n matrix = np.array(matrix)\n \n if feature_names != []:\n adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names), var=pd.DataFrame(index=feature_names))\n else:\n adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names))\n \n adata.uns['omic'] = 'methylation'\n adata.uns['imputation'] = 'no_imputation'\n \n if save:\n adata.write(\"\".join([\".\".split(matrix_file)[0],'.h5ad']))\n \n return(adata)\n\ndef imputation_met(adata, number_cell_covered=10, imputation_value='mean', save=None, copy=False):\n \"\"\"\n Impute missing values in methyaltion level matrices. The imputsation is based on the average\n methylation value of the given variable.\n It also filter out variables that are covered in an unsufficient number of cells in order to \n reduce the feature space to meaningful variables and discard potential coverage biases. \n\n Parameters\n ----------\n adata: AnnData object containing 'nan'\n\n number_cell_covered: minimum number of cells to be covered in order to retain a variable\n\n imputation_value: imputation of the missing value can be made either on the mean or the median\n\n Return\n ------\n Return a new AnnData object\n\n \n \n \"\"\"\n\n # This step need to be sped up and could be multithread.\n # Only the mean available for now. And only the minimum number of cells covered and not the variety of the \n # methylation levels\n # also, it odes not return the variable annoations and force to add 2 values\n old_features = adata.var_names.tolist()\n \n new_matrix = []\n new_features_name = []\n means = []\n medians = []\n feat_nb = 0\n\n length1 = len(adata.X[0,:])\n length2 = len(adata.X[:,0])\n adata.obs['coverage_cells'] = [length1 - np.isnan(line).sum() for line in adata.X]\n adata.obs['mean_cell_methylation'] = [np.nansum(line)/length1 for line in adata.X]\n adata.var['coverage_feature'] = [length2 - np.isnan(line).sum() for line in adata.X.T]\n adata.var['mean_feature_methylation'] = [np.nansum(line)/length2 for line in adata.X.T]\n\n adata2 = adata[:, adata.var['coverage_feature']>=number_cell_covered].copy()\n\n for index in range(len(adata2.var_names.tolist())):\n adata2.X[:,index] = np.nan_to_num(adata2.X[:,index], nan=adata2.var['mean_feature_methylation'][index])\n\n\n if save!= None:\n adata2.write(save.rstrip('.h5ad')+'.h5ad')\n if copy==False:\n adata = adata2.copy()\n else:\n return(adata2)\n\n\n\n\ndef readandimputematrix(file_name, min_coverage=1):\n \"\"\"\n Temporary function to load and impute methyaltion count matrix into an AnnData object\n \n Parameters\n ----------\n file_name : file name to read and load\n \n min_coverage : minimum number of cells covered for which we keep and impute a variable\n \n Returns\n -------\n adata : :class:`~anndata.AnnData`\n Annotated data matrix.\n \n \"\"\"\n with open(file_name) as f:\n file = f.readlines()\n\n # separate annotation from data \n head_var = file[0]\n head_var = head_var.split('\\t')\n # Then, extract the sample names\n sample_names = []\n data_raw = []\n for l in file[1:]:\n l = l.split('\\t')\n sample_names.append(l[0])\n data_raw.append(l[1:])\n\n # clear memory of useless variables \n del file\n \n ##########################################\n # now, removing empty columns\n empties = []\n partial = []\n full = []\n for index in range(1, len(data_raw[0])):\n column = [element[index] for element in data_raw]\n if len(list(set(column))) == 1:\n empties.append(index)\n elif len(list(set(column))) <= min_coverage:\n partial.append(index)\n else:\n full.append(index)\n \n ##########################################\n intermed_matrix = []\n name_windows_covered = []\n # let's remove the compltetly uninformative columns\n for index in range(1, len(head_var[1:])):\n if index in full:\n intermed_matrix.append([element[index] for element in data_raw])\n name_windows_covered.append(head_var[index])\n\n ########################################\n # imputing values.\n imputed_matrix = []\n for row in intermed_matrix:\n imputed_row = []\n if \"nan\" in row:\n mean = np.mean([float(e) for e in row if e != \"nan\"])\n for element in row:\n if element == \"nan\":\n imputed_row.append(str(mean))\n else: \n imputed_row.append(element)\n imputed_matrix.append(imputed_row)\n else:\n imputed_matrix.append(row)\n\n imputed_matrix = np.matrix(imputed_matrix).transpose()\n return(ad.AnnData(imputed_matrix, obs=pd.DataFrame(index=sample_names), var=pd.DataFrame(index=name_windows_covered)))\n #return(imputed_matrix, sample_names, name_windows_covered)\n"
] | [
[
"numpy.matrix",
"numpy.isnan",
"numpy.nan_to_num",
"pandas.DataFrame",
"numpy.nansum",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
yourwanghao/Ultrasound_Nerve_Segmentation | [
"9a73cdb9a97b27c375a1023f4426d7e5a89b6a4d"
] | [
"training_curves.py"
] | [
"#!/usr/bin/env python\n\n## based on https://github.com/dmlc/mxnet/issues/1302\n## Parses the model fit log file and generates a train/val vs epoch plot\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport re\nimport argparse\n\ndef log_train_metric(period, auto_reset=False):\n \"\"\"Callback to log the training evaluation result every period.\n\n Parameters\n ----------\n period : int\n The number of batch to log the training evaluation metric.\n auto_reset : bool\n Reset the metric after each log\n\n Returns\n -------\n callback : function\n The callback function that can be passed as iter_epoch_callback to fit.\n \"\"\"\n def _callback(param):\n \"\"\"The checkpoint function.\"\"\"\n if param.nbatch % period == 0 and param.eval_metric is not None:\n name_value = param.eval_metric.get_name_value()\n for name, value in name_value:\n logging.info('Iter[%d] Batch[%d] Train-%s=%f',\n param.epoch, param.nbatch, name, value)\n if auto_reset:\n param.eval_metric.reset()\n return _callback\n\nparser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')\nparser.add_argument('--log-file', type=str,default=\"log_tr_va\",\n help='the path of log file')\nargs = parser.parse_args()\n\nprint('ok')\n\n\nTR_RE = re.compile('\\s+Train-dicecoef=([\\d\\.]+)')\nVA_RE = re.compile('.*?]\\sValidation-dicecoef=([\\d\\.]+)')\n\nlog = open(args.log_file).read()\n\nlog_tr = [float(x) for x in TR_RE.findall(log)]\nlog_va = [float(x) for x in VA_RE.findall(log)]\nidx = np.arange(len(log_tr))\n\nprint(len(log_tr), len(log_va))\n\n\nplt.figure(figsize=(8, 6))\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Accuracy\")\nplt.plot(idx, log_tr, 'o', linestyle='-', color=\"r\",\n label=\"Train dicecoef\")\n\nplt.plot(idx, log_va, 'o', linestyle='-', color=\"b\",\n label=\"Validation dicecoef\")\n\nplt.legend(loc=\"best\")\nplt.xticks(np.arange(min(idx), max(idx)+1, 5))\nplt.yticks(np.arange(0, 1, 0.2))\nplt.ylim([0,1])\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rxbtz/tensorflow | [
"499f7ed810928e29986453c83778f71e2b351eb5"
] | [
"tensorflow/python/keras/_impl/keras/layers/embeddings.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Embedding layer.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.keras._impl.keras import backend as K\nfrom tensorflow.python.keras._impl.keras import constraints\nfrom tensorflow.python.keras._impl.keras import initializers\nfrom tensorflow.python.keras._impl.keras import regularizers\nfrom tensorflow.python.keras._impl.keras.engine import Layer\nfrom tensorflow.python.keras._impl.keras.utils import tf_utils\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('keras.layers.Embedding')\nclass Embedding(Layer):\n \"\"\"Turns positive integers (indexes) into dense vectors of fixed size.\n\n eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]\n\n This layer can only be used as the first layer in a model.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Embedding(1000, 64, input_length=10))\n # the model will take as input an integer matrix of size (batch,\n input_length).\n # the largest integer (i.e. word index) in the input should be no larger\n than 999 (vocabulary size).\n # now model.output_shape == (None, 10, 64), where None is the batch\n dimension.\n\n input_array = np.random.randint(1000, size=(32, 10))\n\n model.compile('rmsprop', 'mse')\n output_array = model.predict(input_array)\n assert output_array.shape == (32, 10, 64)\n ```\n\n Arguments:\n input_dim: int > 0. Size of the vocabulary,\n i.e. maximum integer index + 1.\n output_dim: int >= 0. Dimension of the dense embedding.\n embeddings_initializer: Initializer for the `embeddings` matrix.\n embeddings_regularizer: Regularizer function applied to\n the `embeddings` matrix.\n embeddings_constraint: Constraint function applied to\n the `embeddings` matrix.\n mask_zero: Whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n This is useful when using recurrent layers\n which may take variable length input.\n If this is `True` then all subsequent layers\n in the model need to support masking or an exception will be raised.\n If mask_zero is set to True, as a consequence, index 0 cannot be\n used in the vocabulary (input_dim should equal size of\n vocabulary + 1).\n input_length: Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n\n Input shape:\n 2D tensor with shape: `(batch_size, sequence_length)`.\n\n Output shape:\n 3D tensor with shape: `(batch_size, sequence_length, output_dim)`.\n\n \"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n embeddings_initializer='uniform',\n embeddings_regularizer=None,\n activity_regularizer=None,\n embeddings_constraint=None,\n mask_zero=False,\n input_length=None,\n **kwargs):\n if 'input_shape' not in kwargs:\n if input_length:\n kwargs['input_shape'] = (input_length,)\n else:\n kwargs['input_shape'] = (None,)\n dtype = kwargs.pop('dtype', K.floatx())\n super(Embedding, self).__init__(dtype=dtype, **kwargs)\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.embeddings_initializer = initializers.get(embeddings_initializer)\n self.embeddings_regularizer = regularizers.get(embeddings_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.embeddings_constraint = constraints.get(embeddings_constraint)\n self.mask_zero = mask_zero\n self.input_length = input_length\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n self.embeddings = self.add_weight(\n shape=(self.input_dim, self.output_dim),\n initializer=self.embeddings_initializer,\n name='embeddings',\n regularizer=self.embeddings_regularizer,\n constraint=self.embeddings_constraint)\n self.built = True\n\n def compute_mask(self, inputs, mask=None):\n if not self.mask_zero:\n return None\n else:\n return math_ops.not_equal(inputs, 0)\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if self.input_length is None:\n return input_shape + (self.output_dim,)\n else:\n # input_length can be tuple if input is 3D or higher\n if isinstance(self.input_length, (list, tuple)):\n in_lens = list(self.input_length)\n else:\n in_lens = [self.input_length]\n if len(in_lens) != len(input_shape) - 1:\n ValueError('\"input_length\" is %s, but received input has shape %s' %\n (str(self.input_length), str(input_shape)))\n else:\n for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n if s1 is not None and s2 is not None and s1 != s2:\n ValueError('\"input_length\" is %s, but received input has shape %s' %\n (str(self.input_length), str(input_shape)))\n elif s1 is None:\n in_lens[i] = s2\n return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n\n def call(self, inputs):\n dtype = K.dtype(inputs)\n if dtype != 'int32' and dtype != 'int64':\n inputs = math_ops.cast(inputs, 'int32')\n out = embedding_ops.embedding_lookup(self.embeddings, inputs)\n return out\n\n def get_config(self):\n config = {\n 'input_dim':\n self.input_dim,\n 'output_dim':\n self.output_dim,\n 'embeddings_initializer':\n initializers.serialize(self.embeddings_initializer),\n 'embeddings_regularizer':\n regularizers.serialize(self.embeddings_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'embeddings_constraint':\n constraints.serialize(self.embeddings_constraint),\n 'mask_zero':\n self.mask_zero,\n 'input_length':\n self.input_length\n }\n base_config = super(Embedding, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] | [
[
"tensorflow.python.keras._impl.keras.constraints.serialize",
"tensorflow.python.keras._impl.keras.backend.floatx",
"tensorflow.python.keras._impl.keras.regularizers.serialize",
"tensorflow.python.keras._impl.keras.initializers.get",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.keras._impl.keras.backend.dtype",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.keras._impl.keras.initializers.serialize",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.keras._impl.keras.regularizers.get",
"tensorflow.python.keras._impl.keras.constraints.get"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
martinRenou/chaco | [
"1888da3ecee89f9b2d11900cda9333b32fc5e89a",
"1888da3ecee89f9b2d11900cda9333b32fc5e89a",
"1888da3ecee89f9b2d11900cda9333b32fc5e89a"
] | [
"chaco/tests/test_grid_data_source.py",
"chaco/contour_line_plot.py",
"chaco/candle_plot.py"
] | [
"\"\"\"\nTests of GridDataSource behavior.\n\"\"\"\n\nimport unittest\n\nfrom numpy import array\nfrom numpy.testing import assert_array_equal\n\nfrom chaco.api import GridDataSource\nfrom traits.testing.unittest_tools import UnittestTools\n\n\nclass GridDataSourceTestCase(UnittestTools, unittest.TestCase):\n\n def setUp(self):\n self.data_source = GridDataSource(\n xdata=array([1, 2, 3]),\n ydata=array([1.5, 0.5, -0.5, -1.5]),\n sort_order=('ascending', 'descending'))\n\n def test_empty(self):\n data_source = GridDataSource()\n self.assertEqual(data_source.sort_order, ('none', 'none'))\n self.assertEqual(data_source.index_dimension, 'image')\n self.assertEqual(data_source.value_dimension, 'scalar')\n self.assertEqual(data_source.metadata,\n {\"selections\":[], \"annotations\":[]})\n xdata, ydata = data_source.get_data()\n assert_array_equal(xdata.get_data(), array([]))\n assert_array_equal(ydata.get_data(), array([]))\n self.assertEqual(data_source.get_bounds(), ((0,0),(0,0)))\n\n def test_init(self):\n test_xd = array([1, 2, 3])\n test_yd = array([1.5, 0.5, -0.5, -1.5])\n test_sort_order = ('ascending', 'descending')\n\n self.assertEqual(self.data_source.sort_order, test_sort_order)\n xd, yd = self.data_source.get_data()\n assert_array_equal(xd.get_data(), test_xd)\n assert_array_equal(yd.get_data(), test_yd)\n self.assertEqual(self.data_source.get_bounds(),\n ((min(test_xd),min(test_yd)),\n (max(test_xd),max(test_yd))))\n\n def test_set_data(self):\n\n test_xd = array([0,2,4])\n test_yd = array([0,1,2,3,4,5])\n test_sort_order = ('none', 'none')\n\n self.data_source.set_data(xdata=test_xd, ydata=test_yd,\n sort_order=('none', 'none'))\n\n self.assertEqual(self.data_source.sort_order, test_sort_order)\n xd, yd = self.data_source.get_data()\n assert_array_equal(xd.get_data(), test_xd)\n assert_array_equal(yd.get_data(), test_yd)\n self.assertEqual(self.data_source.get_bounds(),\n ((min(test_xd),min(test_yd)),\n (max(test_xd),max(test_yd))))\n\n def test_metadata(self):\n self.assertEqual(self.data_source.metadata,\n {'annotations': [], 'selections': []})\n\n def test_metadata_changed(self):\n with self.assertTraitChanges(self.data_source, 'metadata_changed', count=1):\n self.data_source.metadata = {'new_metadata': True}\n\n def test_metadata_items_changed(self):\n with self.assertTraitChanges(self.data_source, 'metadata_changed', count=1):\n self.data_source.metadata['new_metadata'] = True\n",
"\"\"\" Defines the ContourLinePlot class.\n\"\"\"\n\nfrom __future__ import with_statement\n\n# Major library imports\nfrom numpy import array, isfinite, meshgrid, transpose\n\n# Enthought library imports\nfrom enable.api import LineStyle\nfrom kiva import constants\nfrom traits.api import Bool, Dict, Float, List, Str, Trait\n\n# Local relative imports\nfrom .base_contour_plot import BaseContourPlot\nfrom .contour.contour import Cntr\n\n\nclass ContourLinePlot(BaseContourPlot):\n \"\"\" Takes a value data object whose elements are scalars, and renders them\n as a contour plot.\n \"\"\"\n\n # TODO: Modify ImageData to explicitly support scalar value arrays\n\n #------------------------------------------------------------------------\n # Data-related traits\n #------------------------------------------------------------------------\n\n #: The thickness(es) of the contour lines.\n #: It can be either a scalar value, valid for all contour lines, or a list\n #: of widths. If the list is too short with respect to then number of\n #: contour lines, the values are repeated from the beginning of the list.\n #: Widths are associated with levels of increasing value.\n widths = Trait(1.0, Float, List)\n\n #: The line dash style(s).\n styles = Trait(\"signed\", Str, List)\n\n #: Line style for positive levels.\n positive_style = LineStyle(\"solid\")\n\n #: Line style for negative levels.\n negative_style = LineStyle(\"dash\")\n\n #------------------------------------------------------------------------\n # Private traits\n #------------------------------------------------------------------------\n\n # Are the cached contours valid? If False, new ones need to be computed.\n _contour_cache_valid = Bool(False)\n\n # Cached collection of traces.\n _cached_contours = Dict\n\n # Is the cached width data valid?\n _widths_cache_valid = Bool(False)\n\n # Is the cached style data valid?\n _styles_cache_valid = Bool(False)\n\n # Cached list of line widths\n _widths = List\n\n # Cached list of line styles\n _styles = List\n\n # Mapped trait used to convert user-supplied line style values to\n # AGG-acceptable ones. (Mapped traits in lists are not supported, must be\n # converted one at a time.)\n _style_map_trait = LineStyle\n\n #------------------------------------------------------------------------\n # Private methods\n #------------------------------------------------------------------------\n\n def _render(self, gc):\n \"\"\" Actually draws the plot.\n\n Implements the Base2DPlot interface.\n \"\"\"\n if not self._level_cache_valid:\n self._update_levels()\n if not self._contour_cache_valid:\n self._update_contours()\n if not self._widths_cache_valid:\n self._update_widths()\n if not self._styles_cache_valid:\n self._update_styles()\n if not self._colors_cache_valid:\n self._update_colors()\n\n with gc:\n gc.set_antialias(True)\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n gc.set_alpha(self.alpha)\n gc.set_line_join(constants.JOIN_BEVEL)\n gc.set_line_cap(constants.CAP_ROUND)\n\n for i in range(len(self._levels)):\n gc.set_stroke_color(self._colors[i])\n gc.set_line_width(self._widths[i])\n gc.set_line_dash(self._styles[i])\n for trace in self._cached_contours[self._levels[i]]:\n if self.orientation == \"h\":\n strace = self.index_mapper.map_screen(trace)\n else:\n strace = array(\n self.index_mapper.map_screen(trace))[:, ::-1]\n gc.begin_path()\n gc.lines(strace)\n gc.stroke_path()\n\n def _update_contours(self):\n \"\"\" Updates the cache of contour lines \"\"\"\n if self.value.is_masked():\n # XXX masked data and get_data_mask not currently implemented\n data, mask = self.value.get_data_mask()\n mask &= isfinite(data)\n else:\n data = self.value.get_data()\n mask = isfinite(data)\n\n x_data, y_data = self.index.get_data()\n xs = x_data.get_data()\n ys = y_data.get_data()\n xg, yg = meshgrid(xs, ys)\n\n # note: contour wants mask True in invalid locations\n c = Cntr(xg, yg, data, ~mask)\n\n self._cached_contours = {}\n for level in self._levels:\n self._cached_contours[level] = []\n traces = c.trace(level)\n for trace in traces:\n self._cached_contours[level].append(transpose(trace))\n self._contour_cache_valid = True\n\n def _update_levels(self):\n \"\"\" Extends the parent method to also invalidate some other things \"\"\"\n super(ContourLinePlot, self)._update_levels()\n self._contour_cache_valid = False\n self._widths_cache_valid = False\n self._styles_cache_valid = False\n\n def _update_widths(self):\n \"\"\" Updates the widths cache.\n \"\"\"\n # If we are given a single width, apply it to all levels\n if isinstance(self.widths, float):\n self._widths = [self.widths] * len(self._levels)\n\n # If the list of widths is shorter than the list of levels,\n # simply repeat widths from the beginning of the list as needed\n else:\n self._widths = []\n for i in range(len(self._levels)):\n self._widths.append(self.widths[i % len(self.widths)])\n\n self._widths_cache_valid = True\n\n def _update_styles(self):\n \"\"\" Updates the styles cache.\n \"\"\"\n # If the style type is \"signed\" then assign styles to levels based\n # on their sign\n if self.styles == \"signed\":\n self._styles = []\n for level in self._levels:\n if level < 0:\n self._styles.append(self.negative_style_)\n else:\n self._styles.append(self.positive_style_)\n\n # If we not given a list, apply the one style to all levels\n elif not isinstance(self.styles, list):\n self._style_map_trait = self.styles\n self._styles = [self._style_map_trait_] * len(self._levels)\n\n # If the list of styles is shorter than the list of levels,\n # simply repeat styles from the beginning of the list as needed\n else:\n self._styles = []\n for i in range(len(self._levels)):\n self._style_map_trait = self.styles[i % len(self.styles)]\n self._styles.append(self._style_map_trait_)\n\n self._styles_cache_valid = True\n\n #------------------------------------------------------------------------\n # Event handlers\n #------------------------------------------------------------------------\n\n def _widths_changed(self):\n if self._level_cache_valid:\n self._update_widths()\n self.invalidate_draw()\n\n def _styles_changed(self):\n if self._level_cache_valid:\n self._update_styles()\n self.invalidate_draw()\n\n def _negative_style_changed(self):\n if self._level_cache_valid:\n self._update_styles()\n self.invalidate_draw()\n\n def _positive_style_changed(self):\n if self._level_cache_valid:\n self._update_styles()\n self.invalidate_draw()\n",
"\nfrom __future__ import with_statement\n\n# Major library imports\nfrom numpy import array, compress, concatenate, searchsorted\n\n# Enthought library imports\nfrom traits.api import Instance, Property\n\n# Chaco imports\nfrom .abstract_data_source import AbstractDataSource\nfrom .base_candle_plot import BaseCandlePlot\n\ndef broaden(mask):\n \"\"\" Takes a 1D boolean mask array and returns a copy with all the non-zero\n runs widened by 1.\n \"\"\"\n if len(mask) < 2:\n return mask\n # Note: the order in which these operations are performed is important.\n # Modifying newmask in-place with the |= operator only works for if\n # newmask[:-1] is the L-value.\n newmask = concatenate(([False], mask[1:] | mask[:-1]))\n newmask[:-1] |= mask[1:]\n return newmask\n\n\nclass CandlePlot(BaseCandlePlot):\n \"\"\" A plot consisting of a filled bar with an optional centerline and\n stems extending to extrema. Usually used to represent some statistics\n on bins of data, with the centerline representing the mean, the bar\n extents representing +/- 1 standard dev or 10th/90th percentiles, and\n the stems extents representing the minimum and maximum samples.\n\n The values in the **index** datasource indicate the centers of the bins;\n the widths of the bins are *not* specified in data space, and are\n determined by the minimum space between adjacent index values.\n \"\"\"\n\n #------------------------------------------------------------------------\n # Data-related traits\n #------------------------------------------------------------------------\n\n #: The minimum values at each index point. If None, then no stem and no\n #: endcap line will be drawn below each bar.\n min_values = Instance(AbstractDataSource)\n\n #: The \"lower\" extent of the \"bar\", i.e. the value closest to the\n #: corresponding value in min_values at each index.\n bar_min = Instance(AbstractDataSource)\n\n #: Values that appear inside the bar, between bar_min and bar_max. These\n #: Are usually mean or median values, and are rendered with a solid line\n #: of a different color than the bar fill color. This can be None.\n center_values = Instance(AbstractDataSource)\n\n #: The \"upper\" extent of the \"bar\", i.e. the value closest to the\n #: corresponding value in max_values at each index.\n bar_max = Instance(AbstractDataSource)\n\n #: The maximum value at each index point. If None, then no stem and no\n #: endcap line will be drawn above each bar.\n max_values = Instance(AbstractDataSource)\n\n value = Property\n\n def map_data(self, screen_pt, all_values=True):\n \"\"\" Maps a screen space point into the \"index\" space of the plot.\n\n Overrides the BaseXYPlot implementation, and always returns an\n array of (index, value) tuples.\n \"\"\"\n x, y = screen_pt\n if self.orientation == 'v':\n x, y = y, x\n return array((self.index_mapper.map_data(x),\n self.value_mapper.map_data(y)))\n\n def map_index(self, screen_pt, threshold=0.0, outside_returns_none=True,\n index_only = True):\n if not index_only:\n raise NotImplementedError(\"Candle Plots only support index_only map_index()\")\n if len(screen_pt) == 0:\n return None\n\n # Find the closest index point using numpy\n index_data = self.index.get_data()\n if len(index_data) == 0:\n return None\n\n target_data = self.index_mapper.map_data(screen_pt[0])\n\n index = searchsorted(index_data, [target_data])[0]\n if index == len(index_data):\n index -= 1\n # Bracket index and map those points to screen space, then\n # compute the distance\n if index > 0:\n lower = index_data[index-1]\n upper = index_data[index]\n screen_low, screen_high = self.index_mapper.map_screen(array([lower, upper]))\n # Find the closest index\n low_dist = abs(screen_pt[0] - screen_low)\n high_dist = abs(screen_pt[0] - screen_high)\n if low_dist < high_dist:\n index = index - 1\n dist = low_dist\n else:\n dist = high_dist\n # Determine if we need to check the threshold\n if threshold > 0 and dist >= threshold:\n return None\n else:\n return index\n else:\n screen = self.index_mapper.map_screen(index_data[0])\n if threshold > 0 and abs(screen - screen_pt[0]) >= threshold:\n return None\n else:\n return index\n\n def _gather_points(self):\n index = self.index.get_data()\n mask = broaden(self.index_range.mask_data(index))\n\n if not mask.any():\n self._cached_data_pts = []\n self._cache_valid = True\n return\n\n data_pts = [compress(mask, index)]\n\n for v in (self.min_values, self.bar_min, self.center_values, self.bar_max, self.max_values):\n if v is None or len(v.get_data()) == 0:\n data_pts.append(None)\n else:\n data_pts.append(compress(mask, v.get_data()))\n\n self._cached_data_pts = data_pts\n self._cache_valid = True\n\n def _draw_plot(self, gc, view_bounds=None, mode=\"normal\"):\n self._gather_points()\n if len(self._cached_data_pts) == 0:\n return\n\n index = self.index_mapper.map_screen(self._cached_data_pts[0])\n if len(index) == 0:\n return\n\n vals = []\n for v in self._cached_data_pts[1:]:\n if v is None:\n vals.append(None)\n else:\n vals.append(self.value_mapper.map_screen(v))\n\n # Compute lefts and rights from self.index, which represents bin\n # centers.\n if len(index) == 1:\n width = 5.0\n else:\n width = (index[1:] - index[:-1]).min() / 2.5\n left = index - width\n right = index + width\n\n with gc:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n self._render(gc, left, right, *vals)\n\n def _get_value(self):\n if self.center_values is not None:\n return self.center_values\n elif self.bar_min is not None:\n return self.bar_min\n elif self.bar_max is not None:\n return self.bar_max\n\n\n"
] | [
[
"numpy.array"
],
[
"numpy.meshgrid",
"numpy.isfinite",
"numpy.transpose"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.compress",
"numpy.searchsorted"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dewyman/TALON-paper-2019 | [
"8644b34573d6a5924e8d84a234fd0fcbf010c233"
] | [
"pipeline/table_figure_scripts/rename_abundance_file_datasets.py"
] | [
"import pandas as pd \nfrom collections import defaultdict\nimport argparse\n\nparser = argparse.ArgumentParser(description=\\\n\t'Renames PacBio and ONT datasets with more\\\n\t intelligent names')\nparser.add_argument('--f', help='file to swap dataset col names in')\nargs = parser.parse_args()\nf = args.f\n\n# read in mapping file\nmap_df = pd.read_csv('dataset_id_name_map.tsv', sep='\\t')\nmap_df.set_index('dataset_id', inplace=True)\nmap_dict = map_df.to_dict()\n\ndf = pd.read_csv(f, sep='\\t')\ndf.rename(columns=map_dict['dataset_name'], inplace=True)\ndf.to_csv(f, sep='\\t', index=False)\n\n\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gmwang18/pyscf | [
"fcd6877751661c8a9743c1c872a4a2b65f6dd7ac",
"fcd6877751661c8a9743c1c872a4a2b65f6dd7ac",
"fcd6877751661c8a9743c1c872a4a2b65f6dd7ac",
"fcd6877751661c8a9743c1c872a4a2b65f6dd7ac",
"fcd6877751661c8a9743c1c872a4a2b65f6dd7ac",
"fcd6877751661c8a9743c1c872a4a2b65f6dd7ac",
"fcd6877751661c8a9743c1c872a4a2b65f6dd7ac"
] | [
"scf/_vhf.py",
"fci/cistring.py",
"tools/cubegen.py",
"pbc/df/test/test_aft.py",
"symm/addons.py",
"pbc/scf/x2c.py",
"scf/test/test_rhf.py"
] | [
"#!/usr/bin/env python\n\nimport sys\nimport ctypes\nimport _ctypes\nimport numpy\nimport pyscf.lib\nfrom pyscf import gto\nfrom pyscf.gto.moleintor import make_cintopt\n\nlibcvhf = pyscf.lib.load_library('libcvhf')\ndef _fpointer(name):\n return ctypes.c_void_p(_ctypes.dlsym(libcvhf._handle, name))\n\nclass VHFOpt(object):\n def __init__(self, mol, intor,\n prescreen='CVHFnoscreen', qcondname=None, dmcondname=None):\n self._this = ctypes.POINTER(_CVHFOpt)()\n #print self._this.contents, expect ValueError: NULL pointer access\n self._intor = _fpointer(intor)\n self._cintopt = pyscf.lib.c_null_ptr()\n self._dmcondname = dmcondname\n self.init_cvhf_direct(mol, intor, prescreen, qcondname)\n\n def init_cvhf_direct(self, mol, intor, prescreen, qcondname):\n c_atm = numpy.asarray(mol._atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(mol._bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(mol._env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n self._cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n\n# libcvhf.CVHFnr_optimizer(ctypes.byref(self._this),\n# c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n# c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n# c_env.ctypes.data_as(ctypes.c_void_p))\n libcvhf.CVHFinit_optimizer(ctypes.byref(self._this),\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n self._this.contents.fprescreen = _fpointer(prescreen)\n\n if prescreen != 'CVHFnoscreen':\n fsetqcond = getattr(libcvhf, qcondname)\n fsetqcond(self._this,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n @property\n def direct_scf_tol(self):\n return self._this.contents.direct_scf_cutoff\n @direct_scf_tol.setter\n def direct_scf_tol(self, v):\n self._this.contents.direct_scf_cutoff = v\n\n def set_dm(self, dm, atm, bas, env):\n if self._dmcondname is not None:\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n if isinstance(dm, numpy.ndarray) and dm.ndim == 2:\n n_dm = 1\n else:\n n_dm = len(dm)\n dm = numpy.asarray(dm, order='C')\n fsetdm = getattr(libcvhf, self._dmcondname)\n fsetdm(self._this,\n dm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(n_dm),\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\nclass _CVHFOpt(ctypes.Structure):\n _fields_ = [('nbas', ctypes.c_int),\n ('_padding', ctypes.c_int),\n ('direct_scf_cutoff', ctypes.c_double),\n ('q_cond', ctypes.c_void_p),\n ('dm_cond', ctypes.c_void_p),\n ('fprescreen', ctypes.c_void_p),\n ('r_vkscreen', ctypes.c_void_p)]\n\n################################################\n# for general DM\n# hermi = 0 : arbitary\n# hermi = 1 : hermitian\n# hermi = 2 : anti-hermitian\n################################################\ndef incore(eri, dm, hermi=0):\n assert(not numpy.iscomplexobj(eri))\n eri = numpy.ascontiguousarray(eri)\n dm = numpy.ascontiguousarray(dm)\n nao = dm.shape[0]\n vj = numpy.empty((nao,nao))\n vk = numpy.empty((nao,nao))\n npair = nao*(nao+1)//2\n if eri.ndim == 2 and npair*npair == eri.size: # 4-fold symmetry eri\n fdrv = getattr(libcvhf, 'CVHFnrs4_incore_drv')\n # 'ijkl,kl->ij'\n fvj = _fpointer('CVHFics4_kl_s2ij')\n # 'ijkl,il->jk'\n fvk = _fpointer('CVHFics4_il_s1jk')\n # or\n ## 'ijkl,ij->kl'\n #fvj = _fpointer('CVHFics4_ij_s2kl')\n ## 'ijkl,jk->il'\n #fvk = _fpointer('CVHFics4_jk_s1il')\n\n tridm = dm\n elif eri.ndim == 1 and npair*(npair+1)//2 == eri.size: # 8-fold symmetry eri\n fdrv = getattr(libcvhf, 'CVHFnrs8_incore_drv')\n fvj = _fpointer('CVHFics8_tridm_vj')\n if hermi == 1:\n fvk = _fpointer('CVHFics8_jk_s2il')\n else:\n fvk = _fpointer('CVHFics8_jk_s1il')\n tridm = pyscf.lib.pack_tril(pyscf.lib.transpose_sum(dm))\n i = numpy.arange(nao)\n tridm[i*(i+1)//2+i] *= .5\n else:\n raise RuntimeError('Array shape not consistent: DM %s, eri %s'\n % (dm.shape, eri.shape))\n fdrv(eri.ctypes.data_as(ctypes.c_void_p),\n tridm.ctypes.data_as(ctypes.c_void_p),\n vj.ctypes.data_as(ctypes.c_void_p),\n dm.ctypes.data_as(ctypes.c_void_p),\n vk.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(nao), fvj, fvk)\n if hermi != 0:\n vj = pyscf.lib.hermi_triu(vj, hermi)\n vk = pyscf.lib.hermi_triu(vk, hermi)\n else:\n vj = pyscf.lib.hermi_triu(vj, 1)\n return vj, vk\n\n# use cint2e_sph as cintor, CVHFnrs8_ij_s2kl, CVHFnrs8_jk_s2il as fjk to call\n# direct_mapdm\ndef direct(dms, atm, bas, env, vhfopt=None, hermi=0):\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C'),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = numpy.asarray(dms, order='C')\n\n if vhfopt is None:\n cintor = _fpointer('cint2e_sph')\n cintopt = make_cintopt(c_atm, c_bas, c_env, 'cint2e_sph')\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')\n fdot = _fpointer('CVHFdot_nrs8')\n fvj = _fpointer('CVHFnrs8_ji_s2kl')\n if hermi == 1:\n fvk = _fpointer('CVHFnrs8_li_s2kj')\n else:\n fvk = _fpointer('CVHFnrs8_li_s1kj')\n vjk = numpy.empty((2,n_dm,nao,nao))\n fjk = (ctypes.c_void_p*(2*n_dm))()\n dmsptr = (ctypes.c_void_p*(2*n_dm))()\n vjkptr = (ctypes.c_void_p*(2*n_dm))()\n for i in range(n_dm):\n dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)\n vjkptr[i] = vjk[0,i].ctypes.data_as(ctypes.c_void_p)\n fjk[i] = fvj\n for i in range(n_dm):\n dmsptr[n_dm+i] = dms[i].ctypes.data_as(ctypes.c_void_p)\n vjkptr[n_dm+i] = vjk[1,i].ctypes.data_as(ctypes.c_void_p)\n fjk[n_dm+i] = fvk\n shls_slice = (ctypes.c_int*8)(*([0, c_bas.shape[0]]*4))\n ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)\n\n fdrv(cintor, fdot, fjk, dmsptr, vjkptr,\n ctypes.c_int(n_dm*2), ctypes.c_int(1),\n shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n # vj must be symmetric\n for idm in range(n_dm):\n vjk[0,idm] = pyscf.lib.hermi_triu(vjk[0,idm], 1)\n if hermi != 0: # vk depends\n for idm in range(n_dm):\n vjk[1,idm] = pyscf.lib.hermi_triu(vjk[1,idm], hermi)\n if n_dm == 1:\n vjk = vjk.reshape(2,nao,nao)\n return vjk\n\n# call all fjk for each dm, the return array has len(dms)*len(jkdescript)*ncomp components\n# jkdescript: 'ij->s1kl', 'kl->s2ij', ...\ndef direct_mapdm(intor, aosym, jkdescript,\n dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):\n assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',\n 'a4ij', 'a4kl', 'a2ij', 'a2kl'))\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C'),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = [numpy.asarray(dm, order='C') for dm in dms]\n if isinstance(jkdescript, str):\n njk = 1\n jkdescript = (jkdescript,)\n else:\n njk = len(jkdescript)\n\n if vhfopt is None:\n cintor = _fpointer(intor)\n cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')\n dotsym = _INTSYMAP[aosym]\n fdot = _fpointer('CVHFdot_nr'+dotsym)\n\n if shls_slice is None:\n shls_slice = (0, c_bas.shape[0])*4\n ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)\n\n vjk = []\n descr_sym = [x.split('->') for x in jkdescript]\n fjk = (ctypes.c_void_p*(njk*n_dm))()\n dmsptr = (ctypes.c_void_p*(njk*n_dm))()\n vjkptr = (ctypes.c_void_p*(njk*n_dm))()\n for i, (dmsym, vsym) in enumerate(descr_sym):\n if dmsym in ('ij', 'kl', 'il', 'kj'):\n sys.stderr.write('not support DM description %s, transpose to %s\\n' %\n (dmsym, dmsym[::-1]))\n dmsym = dmsym[::-1]\n f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))\n\n vshape = (n_dm,ncomp) + get_dims(vsym[-2:], shls_slice, ao_loc)\n vjk.append(numpy.empty(vshape))\n for j in range(n_dm):\n assert(dms[j].shape == get_dims(dmsym, shls_slice, ao_loc))\n dmsptr[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)\n vjkptr[i*n_dm+j] = vjk[i][j].ctypes.data_as(ctypes.c_void_p)\n fjk[i*n_dm+j] = f1\n shls_slice = (ctypes.c_int*8)(*shls_slice)\n\n fdrv(cintor, fdot, fjk, dmsptr, vjkptr,\n ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),\n shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n if n_dm * ncomp == 1:\n vjk = [v.reshape(v.shape[2:]) for v in vjk]\n elif n_dm == 1:\n vjk = [v.reshape((ncomp,)+v.shape[2:]) for v in vjk]\n elif ncomp == 1:\n vjk = [v.reshape((n_dm,)+v.shape[2:]) for v in vjk]\n if njk == 1:\n vjk = vjk[0]\n return vjk\n\n# for density matrices in dms, bind each dm to a jk operator\n# jkdescript: 'ij->s1kl', 'kl->s2ij', ...\ndef direct_bindm(intor, aosym, jkdescript,\n dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):\n assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',\n 'a4ij', 'a4kl', 'a2ij', 'a2kl'))\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C'),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = [numpy.asarray(dm, order='C') for dm in dms]\n if isinstance(jkdescript, str):\n njk = 1\n jkdescript = (jkdescript,)\n else:\n njk = len(jkdescript)\n assert(njk == n_dm)\n\n if vhfopt is None:\n cintor = _fpointer(intor)\n cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')\n dotsym = _INTSYMAP[aosym]\n fdot = _fpointer('CVHFdot_nr'+dotsym)\n\n if shls_slice is None:\n shls_slice = (0, c_bas.shape[0])*4\n ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)\n\n vjk = []\n descr_sym = [x.split('->') for x in jkdescript]\n fjk = (ctypes.c_void_p*(n_dm))()\n dmsptr = (ctypes.c_void_p*(n_dm))()\n vjkptr = (ctypes.c_void_p*(n_dm))()\n for i, (dmsym, vsym) in enumerate(descr_sym):\n if dmsym in ('ij', 'kl', 'il', 'kj'):\n sys.stderr.write('not support DM description %s, transpose to %s\\n' %\n (dmsym, dmsym[::-1]))\n dmsym = dmsym[::-1]\n f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))\n\n assert(dms[i].shape == get_dims(dmsym, shls_slice, ao_loc))\n vshape = (ncomp,) + get_dims(vsym[-2:], shls_slice, ao_loc)\n vjk.append(numpy.empty(vshape))\n dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)\n vjkptr[i] = vjk[i].ctypes.data_as(ctypes.c_void_p)\n fjk[i] = f1\n shls_slice = (ctypes.c_int*8)(*shls_slice)\n\n fdrv(cintor, fdot, fjk, dmsptr, vjkptr,\n ctypes.c_int(n_dm), ctypes.c_int(ncomp),\n shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n if ncomp == 1:\n vjk = [v.reshape(v.shape[1:]) for v in vjk]\n if njk == 1:\n vjk = vjk[0]\n return vjk\n\n\n# 8-fold permutation symmetry\ndef int2e_sph(atm, bas, env):\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n libcvhf.CINTtot_cgto_spheric.restype = ctypes.c_int\n nao = libcvhf.CINTtot_cgto_spheric(c_bas.ctypes.data_as(ctypes.c_void_p), nbas)\n nao_pair = nao*(nao+1)//2\n eri = numpy.empty((nao_pair*(nao_pair+1)//2))\n libcvhf.int2e_sph(eri.ctypes.data_as(ctypes.c_void_p),\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n return eri\n\n\n################################################################\n# relativistic\ndef rdirect_mapdm(intor, aosym, jkdescript,\n dms, ncomp, atm, bas, env, vhfopt=None):\n assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',\n 'a4ij', 'a4kl', 'a2ij', 'a2kl'))\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)\n if isinstance(jkdescript, str):\n njk = 1\n jkdescript = (jkdescript,)\n else:\n njk = len(jkdescript)\n\n if vhfopt is None:\n cintor = _fpointer(intor)\n cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFr_direct_drv')\n dotsym = _INTSYMAP[aosym]\n fdot = _fpointer('CVHFdot_r'+dotsym)\n\n unpackas = _INTUNPACKMAP_R[aosym]\n descr_sym = [x.split('->') for x in jkdescript]\n fjk = (ctypes.c_void_p*(njk*n_dm))()\n dm1 = (ctypes.c_void_p*(njk*n_dm))()\n for i, (dmsym, vsym) in enumerate(descr_sym):\n f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))\n for j in range(n_dm):\n dm1[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)\n fjk[i*n_dm+j] = f1\n vjk = numpy.empty((njk,n_dm*ncomp,nao,nao), dtype=numpy.complex)\n\n fdrv(cintor, fdot, fjk, dm1,\n vjk.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),\n cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n if n_dm * ncomp == 1:\n vjk = vjk.reshape(njk,nao,nao)\n if njk == 1:\n vjk = vjk.reshape(vjk.shape[1:])\n return vjk\n\n# for density matrices in dms, bind each dm to a jk operator\ndef rdirect_bindm(intor, aosym, jkdescript,\n dms, ncomp, atm, bas, env, vhfopt=None):\n assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',\n 'a4ij', 'a4kl', 'a2ij', 'a2kl'))\n c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n c_env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(c_atm.shape[0])\n nbas = ctypes.c_int(c_bas.shape[0])\n\n if isinstance(dms, numpy.ndarray) and dms.ndim == 2:\n n_dm = 1\n nao = dms.shape[0]\n dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)\n else:\n n_dm = len(dms)\n nao = dms[0].shape[0]\n dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)\n if isinstance(jkdescript, str):\n njk = 1\n jkdescript = (jkdescript,)\n else:\n njk = len(jkdescript)\n assert(njk == n_dm)\n\n if vhfopt is None:\n cintor = _fpointer(intor)\n cintopt = make_cintopt(c_atm, c_bas, c_env, intor)\n cvhfopt = pyscf.lib.c_null_ptr()\n else:\n vhfopt.set_dm(dms, atm, bas, env)\n cvhfopt = vhfopt._this\n cintopt = vhfopt._cintopt\n cintor = vhfopt._intor\n\n fdrv = getattr(libcvhf, 'CVHFr_direct_drv')\n dotsym = _INTSYMAP[aosym]\n fdot = _fpointer('CVHFdot_r'+dotsym)\n\n unpackas = _INTUNPACKMAP_R[aosym]\n descr_sym = [x.split('->') for x in jkdescript]\n fjk = (ctypes.c_void_p*(n_dm))()\n dm1 = (ctypes.c_void_p*(n_dm))()\n for i, (dmsym, vsym) in enumerate(descr_sym):\n f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))\n dm1[i] = dms[i].ctypes.data_as(ctypes.c_void_p)\n fjk[i] = f1\n vjk = numpy.empty((njk,ncomp,nao,nao), dtype=numpy.complex)\n\n fdrv(cintor, fdot, fjk, dm1,\n vjk.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(n_dm), ctypes.c_int(ncomp),\n cintopt, cvhfopt,\n c_atm.ctypes.data_as(ctypes.c_void_p), natm,\n c_bas.ctypes.data_as(ctypes.c_void_p), nbas,\n c_env.ctypes.data_as(ctypes.c_void_p))\n\n if ncomp == 1:\n vjk = vjk.reshape(njk,nao,nao)\n if njk == 1:\n vjk = vjk.reshape(vjk.shape[1:])\n return vjk\n\n# 'a4ij': anti-symm between ij, symm between kl\n# 'a4kl': anti-symm between kl, symm between ij\n# 'a2ij': anti-symm between ij,\n# 'a2kl': anti-symm between kl,\n_INTSYMAP= {\n 's8' : 's8' ,\n 's4' : 's4' ,\n 's2ij': 's2ij',\n 's2kl': 's2kl',\n 's1' : 's1' ,\n 'a4ij': 's4' ,\n 'a4kl': 's4' ,\n 'a2ij': 's2ij',\n 'a2kl': 's2kl',\n}\n\n_INTUNPACKMAP_R = {\n 's8' : 's8' ,\n 's4' : 's4' ,\n 's2ij': 's2ij',\n 's2kl': 's2kl',\n 's1' : 's1' ,\n 'a4ij': 'ah4' ,\n 'a4kl': 'ha4' ,\n 'a2ij': 'ah2ij',\n 'a2kl': 'ha2kl',\n}\n\ndef make_ao_loc(bas, cart=False):\n l = bas[:,gto.ANG_OF]\n if cart:\n dims = (l+1)*(l+2)//2 * bas[:,gto.NCTR_OF]\n else:\n dims = (l*2+1) * bas[:,gto.NCTR_OF]\n ao_loc = numpy.empty(len(bas)+1, dtype=numpy.int32)\n ao_loc[0] = 0\n dims.cumsum(dtype=numpy.int32, out=ao_loc[1:])\n return ao_loc\n\n_SHLINDEX = {'i': 0, 'j': 2, 'k': 4, 'l': 6}\ndef get_dims(descr_sym, shls_slice, ao_loc):\n i = _SHLINDEX[descr_sym[0]]\n j = _SHLINDEX[descr_sym[1]]\n di = ao_loc[shls_slice[i+1]] - ao_loc[shls_slice[i]]\n dj = ao_loc[shls_slice[j+1]] - ao_loc[shls_slice[j]]\n return (di,dj)\n\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport sys\nimport ctypes\nimport math\nimport numpy\nfrom pyscf import lib\n\nlibfci = lib.load_library('libfci')\n\ndef gen_strings4orblist(orb_list, nelec):\n '''Generate string from the given orbital list.\n\n Returns:\n list of int64. One int64 element represents one string in binary format.\n The binary format takes the convention that the one bit stands for one\n orbital, bit-1 means occupied and bit-0 means unoccupied. The lowest\n (right-most) bit corresponds to the lowest orbital in the orb_list.\n\n Exampels:\n\n >>> [bin(x) for x in gen_strings4orblist((0,1,2,3),2)]\n [0b11, 0b101, 0b110, 0b1001, 0b1010, 0b1100]\n >>> [bin(x) for x in gen_strings4orblist((3,1,0,2),2)]\n [0b1010, 0b1001, 0b11, 0b1100, 0b110, 0b101]\n '''\n assert(nelec >= 0)\n if nelec == 0:\n return [0]\n elif nelec > len(orb_list):\n return []\n def gen_str_iter(orb_list, nelec):\n if nelec == 1:\n res = [(1<<i) for i in orb_list]\n res.reverse()\n elif nelec >= len(orb_list):\n n = 0\n for i in orb_list:\n n = n | (1<<i)\n res = [n]\n else:\n restorb = orb_list[1:]\n res = gen_str_iter(restorb, nelec)\n for n in gen_str_iter(restorb, nelec-1):\n res.append(n | (1<<orb_list[0]))\n return res\n strings = gen_str_iter(orb_list[::-1], nelec)\n assert(strings.__len__() == num_strings(len(orb_list),nelec))\n return numpy.asarray(strings, dtype=numpy.int64)\n\ndef num_strings(n, m):\n if m < 0 or m > n:\n return 0\n else:\n return math.factorial(n) // (math.factorial(n-m)*math.factorial(m))\n\ndef gen_linkstr_index_o0(orb_list, nelec, strs=None):\n if strs is None:\n strs = gen_strings4orblist(orb_list, nelec)\n strdic = dict(zip(strs,range(strs.__len__())))\n def propgate1e(str0):\n occ = []\n vir = []\n for i in orb_list:\n if str0 & (1<<i):\n occ.append(i)\n else:\n vir.append(i)\n linktab = []\n for i in occ:\n linktab.append((i, i, strdic[str0], 1))\n for i in occ:\n for a in vir:\n str1 = str0 ^ (1<<i) | (1<<a)\n # [cre, des, target_address, parity]\n linktab.append((a, i, strdic[str1], cre_des_sign(a, i, str0)))\n return linktab\n\n t = [propgate1e(s) for s in strs]\n return numpy.array(t, dtype=numpy.int32)\n\n# return [cre, des, target_address, parity]\ndef gen_linkstr_index(orb_list, nocc, strs=None, tril=False):\n '''Look up table, for the strings relationship in terms of a\n creation-annihilating operator pair.\n\n For given string str0, index[str0] is (nocc+nocc*nvir) x 4 array.\n The first nocc rows [i(:occ),i(:occ),str0,sign] are occupied-occupied\n excitations, which do not change the string. The next nocc*nvir rows\n [a(:vir),i(:occ),str1,sign] are occupied-virtual exciations, starting from\n str0, annihilating i, creating a, to get str1.\n '''\n if strs is None:\n strs = gen_strings4orblist(orb_list, nocc)\n strs = numpy.array(strs, dtype=numpy.uint64)\n assert(all(strs[:-1] < strs[1:]))\n norb = len(orb_list)\n nvir = norb - nocc\n na = strs.shape[0]\n link_index = numpy.empty((na,nocc*nvir+nocc,4), dtype=numpy.int32)\n libfci.FCIlinkstr_index(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(na),\n ctypes.c_int(nocc),\n strs.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(tril))\n return link_index\n\ndef reform_linkstr_index(link_index):\n '''Compress the (a, i) pair index in linkstr_index to a lower triangular\n index, to match the 4-fold symmetry of integrals.\n '''\n #for k, tab in enumerate(link_index):\n # for j, (a, i, str1, sign) in enumerate(tab):\n # if a > i:\n # ai = a*(a+1)//2+i\n # else:\n # ai = i*(i+1)//2+a\n # link_new[k,j] = (ai,0,str1,sign)\n link_new = link_index.copy()\n a = link_index[:,:,0]\n i = link_index[:,:,1]\n ai = a*(a+1)//2 + i\n ia = i*(i+1)//2 + a\n link_new[:,:,0][a>i ] = ai[a>i ]\n link_new[:,:,0][a<=i] = ia[a<=i]\n link_new[:,:,1] = 0\n return link_new\n\ndef gen_linkstr_index_trilidx(orb_list, nocc, strs=None):\n r'''Generate linkstr_index with the assumption that :math:`p^+ q|0\\rangle`\n where :math:`p > q`.\n So the resultant link_index has the structure ``[pq, *, str1, sign]``.\n It is identical to a call to ``reform_linkstr_index(gen_linkstr_index(...))``.\n '''\n return gen_linkstr_index(orb_list, nocc, strs, True)\n\n# return [cre, des, target_address, parity]\ndef gen_cre_str_index_o0(orb_list, nelec):\n cre_strs = gen_strings4orblist(orb_list, nelec+1)\n credic = dict(zip(cre_strs,range(cre_strs.__len__())))\n def progate1e(str0):\n linktab = []\n for i in orb_list:\n if not str0 & (1<<i):\n str1 = str0 | (1<<i)\n linktab.append((i, 0, credic[str1], cre_sign(i, str0)))\n return linktab\n\n t = [progate1e(s) for s in gen_strings4orblist(orb_list, nelec)]\n return numpy.array(t, dtype=numpy.int32)\ndef gen_cre_str_index_o1(orb_list, nelec):\n norb = len(orb_list)\n assert(nelec < norb)\n strs = gen_strings4orblist(orb_list, nelec)\n strs = numpy.array(strs, dtype=numpy.int64)\n na = strs.shape[0]\n link_index = numpy.empty((len(strs),norb-nelec,4), dtype=numpy.int32)\n libfci.FCIcre_str_index(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(na),\n ctypes.c_int(nelec),\n strs.ctypes.data_as(ctypes.c_void_p))\n return link_index\ndef gen_cre_str_index(orb_list, nelec):\n '''linkstr_index to map between N electron string to N+1 electron string.\n It maps the given string to the address of the string which is generated by\n the creation operator.\n\n For given string str0, index[str0] is nvir x 4 array. Each entry\n [i(cre),--,str1,sign] means starting from str0, creating i, to get str1.\n '''\n return gen_cre_str_index_o1(orb_list, nelec)\n\n# return [cre, des, target_address, parity]\ndef gen_des_str_index_o0(orb_list, nelec):\n des_strs = gen_strings4orblist(orb_list, nelec-1)\n desdic = dict(zip(des_strs,range(des_strs.__len__())))\n def progate1e(str0):\n linktab = []\n for i in orb_list:\n if str0 & (1<<i):\n str1 = str0 ^ (1<<i)\n linktab.append((0, i, desdic[str1], des_sign(i, str0)))\n return linktab\n\n t = [progate1e(s) for s in gen_strings4orblist(orb_list, nelec)]\n return numpy.array(t, dtype=numpy.int32)\ndef gen_des_str_index_o1(orb_list, nelec):\n assert(nelec > 0)\n strs = gen_strings4orblist(orb_list, nelec)\n strs = numpy.array(strs, dtype=numpy.int64)\n norb = len(orb_list)\n na = strs.shape[0]\n link_index = numpy.empty((len(strs),nelec,4), dtype=numpy.int32)\n libfci.FCIdes_str_index(link_index.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(norb), ctypes.c_int(na),\n ctypes.c_int(nelec),\n strs.ctypes.data_as(ctypes.c_void_p))\n return link_index\ndef gen_des_str_index(orb_list, nelec):\n '''linkstr_index to map between N electron string to N-1 electron string.\n It maps the given string to the address of the string which is generated by\n the annihilation operator.\n\n For given string str0, index[str0] is nvir x 4 array. Each entry\n [--,i(des),str1,sign] means starting from str0, annihilating i, to get str1.\n '''\n return gen_des_str_index_o1(orb_list, nelec)\n\n\n\n# Determine the sign of p^+ q |string0>\ndef cre_des_sign(p, q, string0):\n if p == q:\n return 1\n else:\n if (string0 & (1<<p)) or (not (string0 & (1<<q))):\n return 0\n elif p > q:\n mask = (1 << p) - (1 << (q+1))\n else:\n mask = (1 << q) - (1 << (p+1))\n return (-1) ** bin(string0 & mask).count('1')\n\n# Determine the sign of p^+ |string0>\ndef cre_sign(p, string0):\n if (string0 & (1<<p)):\n return 0\n else:\n return (-1) ** bin(string0>>(p+1)).count('1')\n\n# Determine the sign of p |string0>\ndef des_sign(p, string0):\n if (not (string0 & (1<<p))):\n return 0\n else:\n return (-1) ** bin(string0>>(p+1)).count('1')\n\n# Determine the sign of string1 = p^+ q |string0>\ndef parity(string0, string1):\n sys.stderr.write('Function cistring.parity is deprecated\\n')\n ss = string1 - string0\n def count_bit1(n):\n # see Hamming weight problem and K&R C program\n return bin(n).count('1')\n if ss > 0:\n # string1&ss gives the number of 1s between two strings\n return (-1) ** (count_bit1(string1&ss))\n elif ss == 0:\n return 1\n else:\n return (-1) ** (count_bit1(string0&(-ss)))\n\ndef addr2str_o0(norb, nelec, addr):\n assert(num_strings(norb, nelec) > addr)\n if addr == 0 or nelec == norb or nelec == 0:\n return (1<<nelec) - 1 # ..0011..11\n else:\n for i in reversed(range(norb)):\n addrcum = num_strings(i, nelec)\n if addrcum <= addr:\n return (1<<i) | addr2str_o0(i, nelec-1, addr-addrcum)\ndef addr2str_o1(norb, nelec, addr):\n assert(num_strings(norb, nelec) > addr)\n if addr == 0 or nelec == norb or nelec == 0:\n return (1<<nelec) - 1 # ..0011..11\n str1 = 0\n nelec_left = nelec\n for norb_left in reversed(range(norb)):\n addrcum = num_strings(norb_left, nelec_left)\n if nelec_left == 0:\n break\n elif addr == 0:\n str1 |= (1<<nelec_left) - 1\n break\n elif addrcum <= addr:\n str1 |= 1<<norb_left\n addr -= addrcum\n nelec_left -= 1\n return str1\ndef addr2str(norb, nelec, addr):\n '''Convert CI determinant address to string'''\n return addr2str_o1(norb, nelec, addr)\n\n#def str2addr_o0(norb, nelec, string):\n# if norb <= nelec or nelec == 0:\n# return 0\n# elif (1<<(norb-1)) & string: # remove the first bit\n# return num_strings(norb-1, nelec) \\\n# + str2addr_o0(norb-1, nelec-1, string^(1<<(norb-1)))\n# else:\n# return str2addr_o0(norb-1, nelec, string)\n#def str2addr_o1(norb, nelec, string):\n# #TODO: assert norb > first-bit-in-string, nelec == num-1-in-string\n# addr = 0\n# nelec_left = nelec\n# for norb_left in reversed(range(norb)):\n# if nelec_left == 0 or norb_left < nelec_left:\n# break\n# elif (1<<norb_left) & string:\n# addr += num_strings(norb_left, nelec_left)\n# nelec_left -= 1\n# return addr\ndef str2addr(norb, nelec, string):\n '''Convert the string to the CI determinant address'''\n if isinstance(string, str):\n assert(string.count('1') == nelec)\n string = int(string, 2)\n else:\n assert(bin(string).count('1') == nelec)\n libfci.FCIstr2addr.restype = ctypes.c_int\n return libfci.FCIstr2addr(ctypes.c_int(norb), ctypes.c_int(nelec),\n ctypes.c_ulonglong(string))\n\ndef tn_strs(norb, nelec, n):\n '''Generate strings for Tn amplitudes. Eg n=1 (T1) has nvir*nocc strings,\n n=2 (T2) has nvir*(nvir-1)/2 * nocc*(nocc-1)/2 strings.\n '''\n if nelec < n or norb-nelec < n:\n return numpy.zeros(0, dtype=int)\n occs_allow = numpy.asarray(gen_strings4orblist(range(nelec), n)[::-1])\n virs_allow = numpy.asarray(gen_strings4orblist(range(nelec,norb), n))\n hf_str = int('1'*nelec, 2)\n tns = (hf_str | virs_allow.reshape(-1,1)) ^ occs_allow\n return tns.ravel()\n\nif __name__ == '__main__':\n #print([bin(i) for i in gen_strings4orblist(range(2,5), 2)])\n #print(gen_strings4orblist(range(4), 2))\n #print(gen_linkstr_index(range(6), 3))\n# index = gen_linkstr_index(range(8), 4)\n# idx16 = index[:16]\n# print(idx16[:,:,2])\n tab1 = gen_linkstr_index_o0(range(8), 4)\n tab2 = gen_linkstr_index(range(8), 4)\n print(abs(tab1 - tab2).sum())\n\n print(addr2str_o0(6, 3, 7) - addr2str(6, 3, 7))\n print(addr2str_o0(6, 3, 8) - addr2str(6, 3, 8))\n print(addr2str_o0(7, 4, 9) - addr2str(7, 4, 9))\n\n print(str2addr(6, 3, addr2str(6, 3, 7)) - 7)\n print(str2addr(6, 3, addr2str(6, 3, 8)) - 8)\n print(str2addr(7, 4, addr2str(7, 4, 9)) - 9)\n\n tab1 = gen_cre_str_index_o0(range(8), 4)\n tab2 = gen_cre_str_index_o1(range(8), 4)\n print(abs(tab1 - tab2).sum())\n tab1 = gen_des_str_index_o0(range(8), 4)\n tab2 = gen_des_str_index_o1(range(8), 4)\n print(abs(tab1 - tab2).sum())\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport numpy\nfrom pyscf import lib\nfrom pyscf.dft import numint, gen_grid\n\n'''\nGaussian cube file format\n'''\n\ndef density(mol, outfile, dm, nx=80, ny=80, nz=80):\n coord = mol.atom_coords()\n box = numpy.max(coord,axis=0) - numpy.min(coord,axis=0) + 4\n boxorig = numpy.min(coord,axis=0) - 2\n xs = numpy.arange(nx) * (box[0]/nx)\n ys = numpy.arange(ny) * (box[1]/ny)\n zs = numpy.arange(nz) * (box[2]/nz)\n coords = lib.cartesian_prod([xs,ys,zs])\n coords = numpy.asarray(coords, order='C') - (-boxorig)\n\n nao = mol.nao_nr()\n ngrids = nx * ny * nz\n blksize = min(200, ngrids)\n rho = numpy.empty(ngrids)\n for ip0, ip1 in gen_grid.prange(0, ngrids, blksize):\n ao = numint.eval_ao(mol, coords[ip0:ip1])\n rho[ip0:ip1] = numint.eval_rho(mol, ao, dm)\n rho = rho.reshape(nx,ny,nz)\n\n with open(outfile, 'w') as f:\n f.write('Density in real space\\n')\n f.write('Comment line\\n')\n f.write('%5d' % mol.natm)\n f.write(' %14.8f %14.8f %14.8f\\n' % tuple(boxorig.tolist()))\n f.write('%5d %14.8f %14.8f %14.8f\\n' % (nx, xs[1], 0, 0))\n f.write('%5d %14.8f %14.8f %14.8f\\n' % (ny, 0, ys[1], 0))\n f.write('%5d %14.8f %14.8f %14.8f\\n' % (nz, 0, 0, zs[1]))\n for ia in range(mol.natm):\n chg = mol.atom_charge(ia)\n f.write('%5d %f' % (chg, chg))\n f.write(' %14.8f %14.8f %14.8f\\n' % tuple(coord[ia]))\n fmt = ' %14.8e' * nz + '\\n'\n for ix in range(nx):\n for iy in range(ny):\n f.write(fmt % tuple(rho[ix,iy].tolist()))\n\n\nif __name__ == '__main__':\n from pyscf import gto, scf\n from pyscf.tools import cubegen\n mol = gto.M(atom='H 0 0 0; H 0 0 1')\n mf = scf.RHF(mol)\n mf.scf()\n cubegen.density(mol, 'h2.cube', mf.make_rdm1())\n\n",
"import unittest\nimport numpy\nimport numpy as np\n\nfrom pyscf.pbc import gto as pgto\nimport pyscf.pbc.dft as pdft\nfrom pyscf.pbc.df import fft, aft, mdf\n\n\n\n\n##################################################\n#\n# port from ao2mo/eris.py\n#\n##################################################\nfrom pyscf import lib\nfrom pyscf.pbc import lib as pbclib\nfrom pyscf.pbc.dft.gen_grid import gen_uniform_grids\nfrom pyscf.pbc.dft.numint import eval_ao\nfrom pyscf.pbc import tools\n\neinsum = np.einsum\n\n\"\"\"\n (ij|kl) = \\int dr1 dr2 i*(r1) j(r1) v(r12) k*(r2) l(r2)\n = (ij|G) v(G) (G|kl)\n\n i*(r) j(r) = 1/N \\sum_G e^{iGr} (G|ij)\n = 1/N \\sum_G e^{-iGr} (ij|G)\n\n \"forward\" FFT:\n (G|ij) = \\sum_r e^{-iGr} i*(r) j(r) = fft[ i*(r) j(r) ]\n \"inverse\" FFT:\n (ij|G) = \\sum_r e^{iGr} i*(r) j(r) = N * ifft[ i*(r) j(r) ]\n = conj[ \\sum_r e^{-iGr} j*(r) i(r) ]\n\"\"\"\n\ndef general(cell, mo_coeffs, kpts=None, compact=0):\n '''pyscf-style wrapper to get MO 2-el integrals.'''\n assert len(mo_coeffs) == 4\n if kpts is not None:\n assert len(kpts) == 4\n return get_mo_eri(cell, mo_coeffs, kpts)\n\ndef get_mo_eri(cell, mo_coeffs, kpts=None):\n '''Convenience function to return MO 2-el integrals.'''\n mo_coeff12 = mo_coeffs[:2]\n mo_coeff34 = mo_coeffs[2:]\n if kpts is None:\n kpts12 = kpts34 = q = None\n else:\n kpts12 = kpts[:2]\n kpts34 = kpts[2:]\n q = kpts12[0] - kpts12[1]\n #q = kpts34[1] - kpts34[0]\n if q is None:\n q = np.zeros(3)\n\n mo_pairs12_kG = get_mo_pairs_G(cell, mo_coeff12, kpts12)\n mo_pairs34_invkG = get_mo_pairs_invG(cell, mo_coeff34, kpts34, q)\n return assemble_eri(cell, mo_pairs12_kG, mo_pairs34_invkG, q)\n\ndef get_mo_pairs_G(cell, mo_coeffs, kpts=None, q=None):\n '''Calculate forward (G|ij) FFT of all MO pairs.\n\n TODO: - Implement simplifications for real orbitals.\n\n Args:\n mo_coeff: length-2 list of (nao,nmo) ndarrays\n The two sets of MO coefficients to use in calculating the\n product |ij).\n\n Returns:\n mo_pairs_G : (ngs, nmoi*nmoj) ndarray\n The FFT of the real-space MO pairs.\n '''\n coords = gen_uniform_grids(cell)\n if kpts is None:\n q = np.zeros(3)\n aoR = eval_ao(cell, coords)\n ngs = aoR.shape[0]\n\n if np.array_equal(mo_coeffs[0], mo_coeffs[1]):\n nmoi = nmoj = mo_coeffs[0].shape[1]\n moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n else:\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])\n\n else:\n if q is None:\n q = kpts[1]-kpts[0]\n aoR_ki = eval_ao(cell, coords, kpt=kpts[0])\n aoR_kj = eval_ao(cell, coords, kpt=kpts[1])\n ngs = aoR_ki.shape[0]\n\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])\n\n #mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)\n mo_pairs_G = np.zeros([ngs,nmoi*nmoj], np.complex128)\n\n fac = np.exp(-1j*np.dot(coords, q))\n for i in xrange(nmoi):\n for j in xrange(nmoj):\n mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]\n mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R_ij, cell.gs, fac)\n\n return mo_pairs_G\n\ndef get_mo_pairs_invG(cell, mo_coeffs, kpts=None, q=None):\n '''Calculate \"inverse\" (ij|G) FFT of all MO pairs.\n\n TODO: - Implement simplifications for real orbitals.\n\n Args:\n mo_coeff: length-2 list of (nao,nmo) ndarrays\n The two sets of MO coefficients to use in calculating the\n product |ij).\n\n Returns:\n mo_pairs_invG : (ngs, nmoi*nmoj) ndarray\n The inverse FFTs of the real-space MO pairs.\n '''\n coords = gen_uniform_grids(cell)\n if kpts is None:\n q = np.zeros(3)\n aoR = eval_ao(cell, coords)\n ngs = aoR.shape[0]\n\n if np.array_equal(mo_coeffs[0], mo_coeffs[1]):\n nmoi = nmoj = mo_coeffs[0].shape[1]\n moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n else:\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])\n\n else:\n if q is None:\n q = kpts[1]-kpts[0]\n aoR_ki = eval_ao(cell, coords, kpt=kpts[0])\n aoR_kj = eval_ao(cell, coords, kpt=kpts[1])\n ngs = aoR_ki.shape[0]\n\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])\n\n #mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)\n mo_pairs_invG = np.zeros([ngs,nmoi*nmoj], np.complex128)\n\n fac = np.exp(1j*np.dot(coords, q))\n for i in xrange(nmoi):\n for j in xrange(nmoj):\n mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]\n mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R_ij), cell.gs, fac))\n\n return mo_pairs_invG\n\ndef get_mo_pairs_G_old(cell, mo_coeffs, kpts=None, q=None):\n '''Calculate forward (G|ij) and \"inverse\" (ij|G) FFT of all MO pairs.\n\n TODO: - Implement simplifications for real orbitals.\n\n Args:\n mo_coeff: length-2 list of (nao,nmo) ndarrays\n The two sets of MO coefficients to use in calculating the\n product |ij).\n\n Returns:\n mo_pairs_G, mo_pairs_invG : (ngs, nmoi*nmoj) ndarray\n The FFTs of the real-space MO pairs.\n '''\n coords = gen_uniform_grids(cell)\n if kpts is None:\n q = np.zeros(3)\n aoR = eval_ao(cell, coords)\n ngs = aoR.shape[0]\n\n if np.array_equal(mo_coeffs[0], mo_coeffs[1]):\n nmoi = nmoj = mo_coeffs[0].shape[1]\n moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n else:\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])\n\n else:\n if q is None:\n q = kpts[1]-kpts[0]\n aoR_ki = eval_ao(cell, coords, kpt=kpts[0])\n aoR_kj = eval_ao(cell, coords, kpt=kpts[1])\n ngs = aoR_ki.shape[0]\n\n nmoi = mo_coeffs[0].shape[1]\n nmoj = mo_coeffs[1].shape[1]\n moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])\n mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])\n\n mo_pairs_R = np.einsum('ri,rj->rij', np.conj(moiR), mojR)\n mo_pairs_G = np.zeros([ngs,nmoi*nmoj], np.complex128)\n mo_pairs_invG = np.zeros([ngs,nmoi*nmoj], np.complex128)\n\n fac = np.exp(-1j*np.dot(coords, q))\n for i in xrange(nmoi):\n for j in xrange(nmoj):\n mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R[:,i,j], cell.gs, fac)\n mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R[:,i,j]), cell.gs,\n fac.conj()))\n\n return mo_pairs_G, mo_pairs_invG\n\ndef assemble_eri(cell, orb_pair_invG1, orb_pair_G2, q=None):\n '''Assemble 4-index electron repulsion integrals.\n\n Returns:\n (nmo1*nmo2, nmo3*nmo4) ndarray\n\n '''\n if q is None:\n q = np.zeros(3)\n\n coulqG = tools.get_coulG(cell, -1.0*q)\n ngs = orb_pair_invG1.shape[0]\n Jorb_pair_G2 = np.einsum('g,gn->gn',coulqG,orb_pair_G2)*(cell.vol/ngs**2)\n eri = np.dot(orb_pair_invG1.T, Jorb_pair_G2)\n return eri\n\ndef get_ao_pairs_G(cell, kpt=np.zeros(3)):\n '''Calculate forward (G|ij) and \"inverse\" (ij|G) FFT of all AO pairs.\n\n Args:\n cell : instance of :class:`Cell`\n\n Returns:\n ao_pairs_G, ao_pairs_invG : (ngs, nao*(nao+1)/2) ndarray\n The FFTs of the real-space AO pairs.\n\n '''\n coords = gen_uniform_grids(cell)\n aoR = eval_ao(cell, coords, kpt) # shape = (coords, nao)\n ngs, nao = aoR.shape\n gamma_point = abs(kpt).sum() < 1e-9\n if gamma_point:\n npair = nao*(nao+1)//2\n ao_pairs_G = np.empty([ngs, npair], np.complex128)\n\n ij = 0\n for i in range(nao):\n for j in range(i+1):\n ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]\n ao_pairs_G[:,ij] = tools.fft(ao_ij_R, cell.gs)\n #ao_pairs_invG[:,ij] = ngs*tools.ifft(ao_ij_R, cell.gs)\n ij += 1\n ao_pairs_invG = ao_pairs_G.conj()\n else:\n ao_pairs_G = np.zeros([ngs, nao,nao], np.complex128)\n for i in range(nao):\n for j in range(nao):\n ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]\n ao_pairs_G[:,i,j] = tools.fft(ao_ij_R, cell.gs)\n ao_pairs_invG = ao_pairs_G.transpose(0,2,1).conj().reshape(-1,nao**2)\n ao_pairs_G = ao_pairs_G.reshape(-1,nao**2)\n return ao_pairs_G, ao_pairs_invG\n\ndef get_ao_eri(cell, kpt=np.zeros(3)):\n '''Convenience function to return AO 2-el integrals.'''\n\n ao_pairs_G, ao_pairs_invG = get_ao_pairs_G(cell, kpt)\n eri = assemble_eri(cell, ao_pairs_invG, ao_pairs_G)\n if abs(kpt).sum() < 1e-9:\n eri = eri.real\n return eri\n\n##################################################\n#\n# ao2mo/eris.py end\n#\n##################################################\n\n\n\n\ncell = pgto.Cell()\ncell.atom = 'He 1. .5 .5; C .1 1.3 2.1'\ncell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))],\n 'C' :'gth-szv',}\ncell.pseudo = {'C':'gth-pade'}\ncell.a = np.eye(3) * 2.5\ncell.gs = [10] * 3\ncell.build()\nnp.random.seed(1)\nkpts = np.random.random((4,3))\nkpts[3] = kpts[0]-kpts[1]+kpts[2]\nkpt0 = np.zeros(3)\n\ncell1 = pgto.Cell()\ncell1.atom = 'He 1. .5 .5; He .1 1.3 2.1'\ncell1.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}\ncell1.a = np.eye(3) * 2.5\ncell1.gs = [10] * 3\ncell1.build()\nkdf0 = mdf.MDF(cell1)\nkdf0.kpts = kpts\n\n\ndef finger(a):\n w = np.cos(np.arange(a.size))\n return np.dot(w, a.ravel())\n\nclass KnowValues(unittest.TestCase):\n def test_aft_get_nuc(self):\n df = aft.AFTDF(cell)\n v1 = df.get_nuc(kpts[0])\n self.assertAlmostEqual(finger(v1), (-6.0893491060887159+0.19823828749533859j), 8)\n\n def test_aft_get_pp(self):\n v0 = pgto.pseudo.get_pp(cell, kpts[0])\n v1 = aft.AFTDF(cell).get_pp(kpts)\n self.assertTrue(np.allclose(v0, v1[0], atol=1e-5, rtol=1e-5))\n self.assertAlmostEqual(finger(v1[0]), (-5.6240305085898807+0.22094834207603817j), 8)\n\n v0 = pgto.pseudo.get_pp(cell, kpts[1])\n self.assertTrue(np.allclose(v0, v1[1], atol=1e-5, rtol=1e-5))\n self.assertAlmostEqual(finger(v1[1]), (-5.53877585793+1.043933371359j) ,8)\n self.assertAlmostEqual(finger(v1[2]), (-6.05309558678+0.281728966073j), 8)\n self.assertAlmostEqual(finger(v1[3]), (-5.60115995450+0.275973062529j), 8)\n\n def test_aft_get_ao_eri(self):\n df0 = fft.FFTDF(cell)\n df = aft.AFTDF(cell)\n eri0 = df0.get_ao_eri(compact=True)\n eri1 = df.get_ao_eri(compact=True)\n self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))\n self.assertAlmostEqual(finger(eri1), 0.80425361966560172, 8)\n\n eri0 = df0.get_ao_eri(kpts[0])\n eri1 = df.get_ao_eri(kpts[0])\n self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))\n self.assertAlmostEqual(finger(eri1), (2.9346374476387949-0.20479054936779137j), 8)\n\n eri0 = df0.get_ao_eri(kpts)\n eri1 = df.get_ao_eri(kpts)\n self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))\n self.assertAlmostEqual(finger(eri1), (0.33709287302019619-0.94185725020966538j), 8)\n\n def test_get_eri_gamma(self):\n odf0 = mdf.MDF(cell1)\n odf = aft.AFTDF(cell1)\n ref = odf0.get_eri()\n eri0000 = odf.get_eri(compact=True)\n self.assertTrue(eri0000.dtype == numpy.double)\n self.assertTrue(np.allclose(eri0000, ref, atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(eri0000), 0.23714016293926865, 9)\n\n ref = kdf0.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))\n eri1111 = odf.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))\n self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)\n\n eri1111 = odf.get_eri((kpts[0]+1e-8,kpts[0]+1e-8,kpts[0],kpts[0]))\n self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)\n\n def test_get_eri_0011(self):\n odf = aft.AFTDF(cell1)\n ref = kdf0.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))\n eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))\n self.assertTrue(np.allclose(eri0011, ref, atol=1e-3, rtol=1e-3))\n self.assertAlmostEqual(finger(eri0011), (1.2410162858084512+0.00074485383749912936j), 9)\n\n ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[0],kpts[1],kpts[1]))\n eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))\n self.assertTrue(np.allclose(eri0011, ref, atol=1e-9, rtol=1e-9))\n self.assertAlmostEqual(finger(eri0011), (1.2410162860852818+0.00074485383748954838j), 9)\n\n def test_get_eri_0110(self):\n odf = aft.AFTDF(cell1)\n ref = kdf0.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))\n eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))\n self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))\n eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))\n self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))\n self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)\n\n ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[1],kpts[1],kpts[0]))\n eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))\n self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))\n self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)\n eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))\n self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))\n self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)\n\n def test_get_eri_0123(self):\n odf = aft.AFTDF(cell1)\n ref = kdf0.get_eri(kpts)\n eri1111 = odf.get_eri(kpts)\n self.assertTrue(np.allclose(eri1111, ref, atol=1e-8, rtol=1e-8))\n self.assertAlmostEqual(finger(eri1111), (1.2917759427391706-0.013340252488069412j), 9)\n\n ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, kpts)\n self.assertTrue(np.allclose(eri1111, ref, atol=1e-8, rtol=1e-8))\n\n def test_get_mo_eri(self):\n df0 = fft.FFTDF(cell)\n odf = aft.AFTDF(cell)\n nao = cell.nao_nr()\n numpy.random.seed(5)\n mo =(numpy.random.random((nao,nao)) +\n numpy.random.random((nao,nao))*1j)\n eri_mo0 = df0.get_mo_eri((mo,)*4, kpts)\n eri_mo1 = odf.get_mo_eri((mo,)*4, kpts)\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n kpts_t = (kpts[2],kpts[3],kpts[0],kpts[1])\n eri_mo2 = df0.get_mo_eri((mo,)*4, kpts_t)\n eri_mo2 = eri_mo2.reshape((nao,)*4).transpose(2,3,0,1).reshape(nao**2,-1)\n self.assertTrue(np.allclose(eri_mo2, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],)*4)\n eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],)*4)\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))\n eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))\n eri_mo1 = odf.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))\n eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n mo1 = mo[:,:nao//2+1]\n eri_mo0 = df0.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)\n eri_mo1 = odf.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n eri_mo0 = df0.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))\n eri_mo1 = odf.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))\n self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))\n\n\nif __name__ == '__main__':\n print(\"Full Tests for aft\")\n unittest.main()\n\n",
"#\n# Author: Qiming Sun <[email protected]>\n#\n\nfrom functools import reduce\nimport numpy\nimport scipy.linalg\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.symm import basis\nfrom pyscf.symm import param\n\n\ndef label_orb_symm(mol, irrep_name, symm_orb, mo, s=None, check=True, tol=1e-9):\n '''Label the symmetry of given orbitals\n\n irrep_name can be either the symbol or the ID of the irreducible\n representation. If the ID is provided, it returns the numeric code\n associated with XOR operator, see :py:meth:`symm.param.IRREP_ID_TABLE`\n\n Args:\n mol : an instance of :class:`Mole`\n\n irrep_name : list of str or int\n A list of irrep ID or name, it can be either mol.irrep_id or\n mol.irrep_name. It can affect the return \"label\".\n symm_orb : list of 2d array\n the symmetry adapted basis\n mo : 2d array\n the orbitals to label\n\n Returns:\n list of symbols or integers to represent the irreps for the given\n orbitals\n\n Examples:\n\n >>> from pyscf import gto, scf, symm\n >>> mol = gto.M(atom='H 0 0 0; H 0 0 1', basis='ccpvdz',verbose=0, symmetry=1)\n >>> mf = scf.RHF(mol)\n >>> mf.kernel()\n >>> symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mf.mo_coeff)\n ['Ag', 'B1u', 'Ag', 'B1u', 'B2u', 'B3u', 'Ag', 'B2g', 'B3g', 'B1u']\n >>> symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, mf.mo_coeff)\n [0, 5, 0, 5, 6, 7, 0, 2, 3, 5]\n '''\n nmo = mo.shape[1]\n if s is None:\n s = mol.intor_symmetric('cint1e_ovlp_sph')\n mo_s = numpy.dot(mo.T, s)\n norm = numpy.zeros((len(irrep_name), nmo))\n for i, csym in enumerate(symm_orb):\n moso = numpy.dot(mo_s, csym)\n ovlpso = reduce(numpy.dot, (csym.T, s, csym))\n try:\n norm[i] = numpy.einsum('ik,ki->i', moso, lib.cho_solve(ovlpso, moso.T))\n except:\n ovlpso[numpy.diag_indices(csym.shape[1])] += 1e-12\n norm[i] = numpy.einsum('ik,ki->i', moso, lib.cho_solve(ovlpso, moso.T))\n norm /= numpy.sum(norm, axis=0) # for orbitals which are not normalized\n iridx = numpy.argmax(norm, axis=0)\n orbsym = numpy.asarray([irrep_name[i] for i in iridx])\n logger.debug(mol, 'irreps of each MO %s', orbsym)\n if check:\n largest_norm = norm[iridx,numpy.arange(nmo)]\n orbidx = numpy.where(largest_norm < 1-tol)[0]\n if orbidx.size > 0:\n idx = numpy.where(largest_norm < 1-tol*1e2)[0]\n if idx.size > 0:\n raise ValueError('orbitals %s not symmetrized, norm = %s' %\n (idx, largest_norm[idx]))\n else:\n logger.warn(mol, 'orbitals %s not strictly symmetrized.',\n numpy.unique(orbidx))\n logger.warn(mol, 'They can be symmetrized with '\n 'pyscf.symm.symmetrize_space function.')\n logger.debug(mol, 'norm = %s', largest_norm[orbidx])\n return orbsym\n\ndef symmetrize_orb(mol, mo, orbsym=None, s=None, check=False):\n '''Symmetrize the given orbitals.\n\n This function is different to the :func:`symmetrize_space`: In this\n function, each orbital is symmetrized by removing non-symmetric components.\n :func:`symmetrize_space` symmetrizes the entire space by mixing different\n orbitals.\n\n Note this function might return non-orthorgonal orbitals.\n Call :func:`symmetrize_space` to find the symmetrized orbitals that are\n close to the given orbitals.\n\n Args:\n mo : 2D float array\n The orbital space to symmetrize\n\n Kwargs:\n orbsym : integer list\n Irrep id for each orbital. If not given, the irreps are guessed\n by calling :func:`label_orb_symm`.\n s : 2D float array\n Overlap matrix. If given, use this overlap than the the overlap\n of the input mol.\n\n Returns:\n 2D orbital coefficients\n\n Examples:\n\n >>> from pyscf import gto, symm, scf\n >>> mol = gto.M(atom = 'C 0 0 0; H 1 1 1; H -1 -1 1; H 1 -1 -1; H -1 1 -1',\n ... basis = 'sto3g')\n >>> mf = scf.RHF(mol).run()\n >>> mol.build(0, 0, symmetry='D2')\n >>> mo = symm.symmetrize_orb(mol, mf.mo_coeff)\n >>> print(symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo))\n ['A', 'A', 'B1', 'B2', 'B3', 'A', 'B1', 'B2', 'B3']\n '''\n if s is None:\n s = mol.intor_symmetric('cint1e_ovlp_sph')\n if orbsym is None:\n orbsym = label_orb_symm(mol, mol.irrep_id, mol.symm_orb,\n mo, s=s, check=check)\n orbsym = numpy.asarray(orbsym)\n mo_s = numpy.dot(mo.T, s)\n mo1 = numpy.empty_like(mo)\n\n if orbsym[0] in mol.irrep_name:\n irrep_id = mol.irrep_name\n else:\n irrep_id = mol.irrep_id\n\n for i, ir in enumerate(irrep_id):\n idx = orbsym == ir\n csym = mol.symm_orb[i]\n ovlpso = reduce(numpy.dot, (csym.T, s, csym))\n sc = lib.cho_solve(ovlpso, numpy.dot(mo_s[idx], csym).T)\n mo1[:,idx] = numpy.dot(csym, sc)\n return mo1\n\ndef symmetrize_space(mol, mo, s=None, check=True):\n '''Symmetrize the given orbital space.\n\n This function is different to the :func:`symmetrize_orb`: In this function,\n the given orbitals are mixed to reveal the symmtery; :func:`symmetrize_orb`\n projects out non-symmetric components for each orbital.\n\n Args:\n mo : 2D float array\n The orbital space to symmetrize\n\n Kwargs:\n s : 2D float array\n Overlap matrix. If not given, overlap is computed with the input mol.\n\n Returns:\n 2D orbital coefficients\n\n Examples:\n\n >>> from pyscf import gto, symm, scf\n >>> mol = gto.M(atom = 'C 0 0 0; H 1 1 1; H -1 -1 1; H 1 -1 -1; H -1 1 -1',\n ... basis = 'sto3g')\n >>> mf = scf.RHF(mol).run()\n >>> mol.build(0, 0, symmetry='D2')\n >>> mo = symm.symmetrize_space(mol, mf.mo_coeff)\n >>> print(symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo))\n ['A', 'A', 'A', 'B1', 'B1', 'B2', 'B2', 'B3', 'B3']\n '''\n from pyscf.tools import mo_mapping\n if s is None:\n s = mol.intor_symmetric('cint1e_ovlp_sph')\n nmo = mo.shape[1]\n mo_s = numpy.dot(mo.T, s)\n if check:\n assert(numpy.allclose(numpy.dot(mo_s, mo), numpy.eye(nmo)))\n mo1 = []\n for i, csym in enumerate(mol.symm_orb):\n moso = numpy.dot(mo_s, csym)\n ovlpso = reduce(numpy.dot, (csym.T, s, csym))\n\n# excluding orbitals which are already symmetrized\n try:\n diag = numpy.einsum('ik,ki->i', moso, lib.cho_solve(ovlpso, moso.T))\n except:\n ovlpso[numpy.diag_indices(csym.shape[1])] += 1e-12\n diag = numpy.einsum('ik,ki->i', moso, lib.cho_solve(ovlpso, moso.T))\n idx = abs(1-diag) < 1e-8\n orb_exclude = mo[:,idx]\n mo1.append(orb_exclude)\n moso1 = moso[~idx]\n dm = numpy.dot(moso1.T, moso1)\n\n if dm.trace() > 1e-8:\n e, u = scipy.linalg.eigh(dm, ovlpso)\n mo1.append(numpy.dot(csym, u[:,abs(1-e) < 1e-6]))\n mo1 = numpy.hstack(mo1)\n if mo1.shape[1] != nmo:\n raise ValueError('The input orbital space is not symmetrized.\\n It is '\n 'probably because the input mol and orbitals are of '\n 'different orientation.')\n snorm = numpy.linalg.norm(reduce(numpy.dot, (mo1.T, s, mo1)) - numpy.eye(nmo))\n if check and snorm > 1e-6:\n raise ValueError('Orbitals are not orthogonalized')\n idx = mo_mapping.mo_1to1map(reduce(numpy.dot, (mo.T, s, mo1)))\n return mo1[:,idx]\n\ndef std_symb(gpname):\n '''std_symb('d2h') returns D2h; std_symb('D2H') returns D2h'''\n return gpname[0].upper() + gpname[1:].lower()\n\ndef irrep_name2id(gpname, symb):\n '''Convert the irrep symbol to internal irrep ID\n\n Args:\n gpname : str\n The point group symbol\n symb : str\n Irrep symbol\n\n Returns:\n Irrep ID, int\n '''\n gpname = std_symb(gpname)\n symb = std_symb(symb)\n if gpname in ('Dooh', 'Coov'):\n return basis.linearmole_irrep_symb2id(gpname, symb)\n else:\n return param.IRREP_ID_TABLE[gpname][symb]\n\ndef irrep_id2name(gpname, irrep_id):\n '''Convert the internal irrep ID to irrep symbol\n\n Args:\n gpname : str\n The point group symbol\n irrep_id : int\n See IRREP_ID_TABLE in pyscf/symm/param.py\n\n Returns:\n Irrep sybmol, str\n '''\n gpname = std_symb(gpname)\n if gpname in ('Dooh', 'Coov'):\n return basis.linearmole_irrep_id2symb(gpname, irrep_id)\n else:\n return param.CHARACTER_TABLE[gpname][irrep_id][0]\n\ndef irrep_name(pgname, irrep_id):\n raise RuntimeError('This function was obsoleted. Use irrep_id2name')\n\ndef route(target, nelec, orbsym):\n '''Pick orbitals to form a determinant which has the right symmetry.\n If solution is not found, return []\n '''\n def riter(target, nelec, orbsym):\n if nelec == 1:\n if target in orbsym:\n return [orbsym.index(target)]\n else:\n return []\n else:\n for i, ir in enumerate(orbsym):\n off = i + 1\n orb_left = orbsym[off:]\n res = riter(target^ir, nelec-1, orb_left)\n if res:\n return [i] + [off+x for x in res]\n return []\n if isinstance(orbsym, numpy.ndarray):\n orbsym = orbsym.tolist()\n return riter(target, nelec, orbsym)\n\ndef eigh(h, orbsym):\n '''Solve eigenvalue problem based on the symmetry information for basis.\n See also pyscf/lib/linalg_helper.py :func:`eigh_by_blocks`\n\n Examples:\n\n >>> from pyscf import gto, symm\n >>> mol = gto.M(atom='H 0 0 0; H 0 0 1', basis='ccpvdz', symmetry=True)\n >>> c = numpy.hstack(mol.symm_orb)\n >>> vnuc_so = reduce(numpy.dot, (c.T, mol.intor('cint1e_nuc_sph'), c))\n >>> orbsym = symm.label_orb_symm(mol, mol.irrep_name, mol.symm_orb, c)\n >>> symm.eigh(vnuc_so, orbsym)\n (array([-4.50766885, -1.80666351, -1.7808565 , -1.7808565 , -1.74189134,\n -0.98998583, -0.98998583, -0.40322226, -0.30242374, -0.07608981]),\n ...)\n '''\n return lib.eigh_by_blocks(h, labels=orbsym)\n\nif __name__ == \"__main__\":\n from pyscf import gto\n from pyscf import scf\n mol = gto.Mole()\n mol.build(\n atom = [['H', (0,0,0)], ['H', (0,0,1)]],\n basis = {'H': 'cc-pvdz'},\n symmetry = 1\n )\n mf = scf.RHF(mol)\n mf.scf()\n\n nao, nmo = mf.mo_coeff.shape\n print(label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mf.mo_coeff))\n numpy.random.seed(1)\n u = numpy.random.random((nmo,nmo))*1e-2\n u = scipy.linalg.expm(u - u.T)\n mo = symmetrize_orb(mol, numpy.dot(mf.mo_coeff, u))\n print(label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mo))\n\n orbsym = [0, 3, 0, 2, 5, 6]\n res = route(7, 3, orbsym)\n print(res, reduce(lambda x,y:x^y, [orbsym[i] for i in res]))\n",
"#!/usr/bin/env python\n\n\nimport time\nfrom functools import reduce\nimport copy\nimport numpy\nimport scipy.linalg\nfrom pyscf import lib\nfrom pyscf.gto import mole\nfrom pyscf.lib import logger\nfrom pyscf.scf import x2c\nfrom pyscf.pbc import gto as pbcgto\nfrom pyscf.pbc.df import aft\nfrom pyscf.pbc.df import aft_jk\nfrom pyscf.pbc.df import ft_ao\n\n\ndef sfx2c1e(mf):\n '''Spin-free X2C.\n For the given SCF object, update the hcore constructor.\n\n Args:\n mf : an SCF object\n\n Returns:\n An SCF object\n\n Examples:\n\n >>> mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='ccpvdz', verbose=0)\n >>> mf = scf.sfx2c1e(scf.RHF(mol))\n >>> mf.scf()\n\n >>> mol.symmetry = 1\n >>> mol.build(0, 0)\n >>> mf = scf.sfx2c1e(scf.UHF(mol))\n >>> mf.scf()\n '''\n mf_class = mf.__class__\n if mf_class.__doc__ is None:\n doc = ''\n else:\n doc = mf_class.__doc__\n class X2C_HF(mf_class):\n __doc__ = doc + \\\n '''\n Attributes for spin-free X2C:\n with_x2c : X2C object\n '''\n def __init__(self):\n self.with_x2c = SpinFreeX2C(mf.mol)\n self.__dict__.update(mf.__dict__)\n self._keys = self._keys.union(['with_x2c'])\n\n def get_hcore(self, cell=None, kpts=None, kpt=None):\n if cell is None: cell = self.cell\n if kpts is None:\n if hasattr(self, 'kpts'):\n kpts = self.kpts\n else:\n if kpt is None:\n kpts = self.kpt\n else:\n kpts = kpt\n if self.with_x2c:\n return self.with_x2c.get_hcore(cell, kpts)\n else:\n return mf_class.get_hcore(self, cell, kpts)\n\n return X2C_HF()\n\nsfx2c = sfx2c1e\n\nclass X2C(x2c.X2C):\n def __init__(self, cell, kpts=None):\n self.exp_drop = 0.2\n self.approx = 'atom1e'\n self.xuncontract = True\n self.basis = None\n self.cell = self.mol = cell\n\nclass SpinFreeX2C(X2C):\n def get_hcore(self, cell=None, kpts=None):\n if cell is None: cell = self.cell\n if kpts is None:\n kpts_lst = numpy.zeros((1,3))\n else:\n kpts_lst = numpy.reshape(kpts, (-1,3))\n\n xcell, contr_coeff = self.get_xmol(cell)\n with_df = aft.AFTDF(xcell)\n c = lib.param.LIGHT_SPEED\n assert('1E' in self.approx.upper())\n if 'ATOM' in self.approx.upper():\n atom_slices = xcell.offset_nr_by_atom()\n nao = xcell.nao_nr()\n x = numpy.zeros((nao,nao))\n vloc = numpy.zeros((nao,nao))\n wloc = numpy.zeros((nao,nao))\n for ia in range(xcell.natm):\n ish0, ish1, p0, p1 = atom_slices[ia]\n shls_slice = (ish0, ish1, ish0, ish1)\n t1 = xcell.intor('cint1e_kin_sph', shls_slice=shls_slice)\n v1 = xcell.intor('cint1e_nuc_sph', shls_slice=shls_slice)\n s1 = xcell.intor('cint1e_ovlp_sph', shls_slice=shls_slice)\n w1 = xcell.intor('cint1e_pnucp_sph', shls_slice=shls_slice)\n vloc[p0:p1,p0:p1] = v1\n wloc[p0:p1,p0:p1] = w1\n x[p0:p1,p0:p1] = x2c._x2c1e_xmatrix(t1, v1, w1, s1, c)\n else:\n raise NotImplementedError\n\n t = xcell.pbc_intor('cint1e_kin_sph', 1, lib.HERMITIAN, kpts_lst)\n s = xcell.pbc_intor('cint1e_ovlp_sph', 1, lib.HERMITIAN, kpts_lst)\n v = with_df.get_nuc(kpts_lst)\n #w = get_pnucp(with_df, kpts_lst)\n if self.basis is not None:\n s22 = s\n s21 = pbcgto.intor_cross('cint1e_ovlp_sph', xcell, cell, kpts=kpts_lst)\n\n h1_kpts = []\n for k in range(len(kpts_lst)):\n #h1 = x2c._get_hcore_fw(t[k], vloc, wloc, s[k], x, c) - vloc + v[k]\n #h1 = x2c._get_hcore_fw(t[k], v[k], w[k], s[k], x, c)\n h1 = x2c._get_hcore_fw(t[k], v[k], wloc, s[k], x, c)\n if self.basis is not None:\n c = lib.cho_solve(s22[k], s21[k])\n h1 = reduce(numpy.dot, (c.T, h1, c))\n if self.xuncontract and contr_coeff is not None:\n h1 = reduce(numpy.dot, (contr_coeff.T, h1, contr_coeff))\n h1_kpts.append(h1)\n\n if kpts is None or numpy.shape(kpts) == (3,):\n h1_kpts = h1_kpts[0]\n return lib.asarray(h1_kpts)\n\n\n# We still use Ewald-like technique to compute spVsp.\n# Theoratically, spVsp is not divergent because the numeriator spsp and the\n# denorminator in Coulomb kernel 4pi/G^2 are cancelled. A real space lattice\n# sum can converge to a finite value. However, it's difficult to accurately\n# converge this value, large number of images in lattice summation is still\n# required.\ndef get_pnucp(mydf, kpts=None):\n cell = mydf.cell\n if kpts is None:\n kpts_lst = numpy.zeros((1,3))\n else:\n kpts_lst = numpy.reshape(kpts, (-1,3))\n\n log = logger.Logger(mydf.stdout, mydf.verbose)\n t1 = t0 = (time.clock(), time.time())\n\n nkpts = len(kpts_lst)\n nao = cell.nao_nr()\n nao_pair = nao * (nao+1) // 2\n\n Gv, Gvbase, kws = cell.get_Gv_weights(mydf.gs)\n kpt_allow = numpy.zeros(3)\n if mydf.eta == 0:\n charge = -cell.atom_charges()\n #coulG=4*numpy.pi/G^2 is cancelled with (sigma dot p i, sigma dot p j)\n SI = cell.get_SI(Gv)\n vGR = numpy.einsum('i,ix->x', 4*numpy.pi*charge, SI.real) * kws\n vGI = numpy.einsum('i,ix->x', 4*numpy.pi*charge, SI.imag) * kws\n wjR = numpy.zeros((nkpts,nao_pair))\n wjI = numpy.zeros((nkpts,nao_pair))\n else:\n nuccell = copy.copy(cell)\n half_sph_norm = .5/numpy.sqrt(numpy.pi)\n norm = half_sph_norm/mole._gaussian_int(2, mydf.eta)\n chg_env = [mydf.eta, norm]\n ptr_eta = cell._env.size\n ptr_norm = ptr_eta + 1\n chg_bas = [[ia, 0, 1, 1, 0, ptr_eta, ptr_norm, 0] for ia in range(cell.natm)]\n nuccell._atm = cell._atm\n nuccell._bas = numpy.asarray(chg_bas, dtype=numpy.int32)\n nuccell._env = numpy.hstack((cell._env, chg_env))\n\n wj = lib.asarray(mydf._int_nuc_vloc(nuccell, kpts_lst, 'cint3c2e_pvp1_sph'))\n wjR = wj.real\n wjI = wj.imag\n t1 = log.timer_debug1('pnucp pass1: analytic int', *t1)\n\n charge = -cell.atom_charges()\n #coulG=4*numpy.pi/G^2 is cancelled with (sigma dot p i, sigma dot p j)\n aoaux = ft_ao.ft_ao(nuccell, Gv)\n vGR = numpy.einsum('i,xi->x', 4*numpy.pi*charge, aoaux.real) * kws\n vGI = numpy.einsum('i,xi->x', 4*numpy.pi*charge, aoaux.imag) * kws\n\n max_memory = max(2000, mydf.max_memory-lib.current_memory()[0])\n for k, pqkR, pqkI, p0, p1 \\\n in mydf.ft_loop(mydf.gs, kpt_allow, kpts_lst,\n max_memory=max_memory, aosym='s2'):\n# rho_ij(G) nuc(-G) / G^2\n# = [Re(rho_ij(G)) + Im(rho_ij(G))*1j] [Re(nuc(G)) - Im(nuc(G))*1j] / G^2\n if not aft_jk.gamma_point(kpts_lst[k]):\n wjI[k] += numpy.einsum('k,xk->x', vGR[p0:p1], pqkI)\n wjI[k] -= numpy.einsum('k,xk->x', vGI[p0:p1], pqkR)\n wjR[k] += numpy.einsum('k,xk->x', vGR[p0:p1], pqkR)\n wjR[k] += numpy.einsum('k,xk->x', vGI[p0:p1], pqkI)\n t1 = log.timer_debug1('contracting Vnuc', *t1)\n\n if mydf.eta != 0 and cell.dimension == 3:\n nucbar = sum([z/nuccell.bas_exp(i)[0] for i,z in enumerate(charge)])\n nucbar *= numpy.pi/cell.vol * 2\n ovlp = cell.pbc_intor('cint1e_kin_sph', 1, lib.HERMITIAN, kpts_lst)\n for k in range(nkpts):\n s = lib.pack_tril(ovlp[k])\n wjR[k] -= nucbar * s.real\n wjI[k] -= nucbar * s.imag\n\n wj = []\n for k, kpt in enumerate(kpts_lst):\n if aft_jk.gamma_point(kpt):\n wj.append(lib.unpack_tril(wjR[k]))\n else:\n wj.append(lib.unpack_tril(wjR[k]+wjI[k]*1j))\n\n if kpts is None or numpy.shape(kpts) == (3,):\n wj = wj[0]\n return wj\n\n\nif __name__ == '__main__':\n from pyscf.pbc import scf\n cell = pbcgto.Cell()\n cell.build(unit = 'B',\n a = numpy.eye(3)*4,\n gs = [5]*3,\n atom = 'H 0 0 0; H 0 0 1.8',\n verbose = 4,\n basis='sto3g')\n lib.param.LIGHT_SPEED = 2\n mf = scf.RHF(cell)\n mf.with_df = aft.AFTDF(cell)\n enr = mf.kernel()\n print('E(NR) = %.12g' % enr)\n\n mf = sfx2c1e(mf)\n esfx2c = mf.kernel()\n print('E(SFX2C1E) = %.12g' % esfx2c)\n\n mf = scf.KRHF(cell)\n mf.with_df = aft.AFTDF(cell)\n mf.kpts = cell.make_kpts([2,2,1])\n enr = mf.kernel()\n print('E(k-NR) = %.12g' % enr)\n\n mf = sfx2c1e(mf)\n esfx2c = mf.kernel()\n print('E(k-SFX2C1E) = %.12g' % esfx2c)\n",
"#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport numpy\nimport unittest\nfrom pyscf import gto\nfrom pyscf import scf\n\nmol = gto.M(\n verbose = 5,\n output = '/dev/null',\n atom = '''\nO 0 0 0\nH 0 -0.757 0.587\nH 0 0.757 0.587''',\n basis = 'cc-pvdz',\n)\n\nmf = scf.RHF(mol)\nmf.conv_tol = 1e-10\nmf.kernel()\n\nn2sym = gto.M(\n verbose = 7,\n output = '/dev/null',\n atom = '''\n N 0 0 0\n N 0 0 1''',\n symmetry = 1,\n basis = 'cc-pvdz')\nn2mf = scf.RHF(n2sym).set(conv_tol=1e-10).run()\n\n\nclass KnowValues(unittest.TestCase):\n def test_init_guess_minao(self):\n dm = scf.hf.get_init_guess(mol, key='minao')\n self.assertAlmostEqual(abs(dm).sum(), 13.649710173723346, 9)\n\n def test_1e(self):\n mf = scf.hf.HF1e(mol)\n self.assertAlmostEqual(mf.scf(), -23.867818585778764, 9)\n\n def test_1e_symm(self):\n molsym = gto.M(\n atom = '''\n O 0 0 0\n H 0 -0.757 0.587\n H 0 0.757 0.587''',\n basis = 'cc-pvdz',\n symmetry = 1,\n )\n mf = scf.hf_symm.HF1e(molsym)\n self.assertAlmostEqual(mf.scf(), -23.867818585778764, 9)\n\n def test_energy_tot(self):\n numpy.random.seed(1)\n nao = mol.nao_nr()\n dm = numpy.random.random((nao,nao))\n e = mf.energy_elec(dm)[0]\n self.assertAlmostEqual(e, -59.332199154299914, 9)\n\n def test_mulliken_pop(self):\n numpy.random.seed(1)\n nao = mol.nao_nr()\n dm = numpy.random.random((nao,nao))\n pop, chg = mf.mulliken_pop(mol, dm)\n self.assertAlmostEqual(abs(pop).sum(), 22.941032799355845, 7)\n pop, chg = mf.mulliken_pop_meta_lowdin_ao(mol, dm, pre_orth_method='ano')\n self.assertAlmostEqual(abs(pop).sum(), 22.056441149586863, 7)\n pop, chg = mf.mulliken_pop_meta_lowdin_ao(mol, dm, pre_orth_method='minao')\n self.assertAlmostEqual(abs(pop).sum(), 22.118254161380747, 7)\n pop, chg = mf.mulliken_pop_meta_lowdin_ao(mol, dm, pre_orth_method='scf')\n self.assertAlmostEqual(abs(pop).sum(), 22.117869619510266, 7)\n\n def test_analyze(self):\n numpy.random.seed(1)\n nao = mol.nao_nr()\n mo = numpy.random.random((nao,nao))\n popandchg, dip = mf.analyze()\n self.assertAlmostEqual(numpy.linalg.norm(popandchg[0]), 4.0048449691540391, 6)\n self.assertAlmostEqual(numpy.linalg.norm(dip), 2.05844441822, 8)\n\n def test_scf(self):\n self.assertAlmostEqual(mf.e_tot, -76.026765673119627, 9)\n\n def test_nr_rohf(self):\n pmol = mol.copy()\n pmol.charge = 1\n pmol.spin = 1\n pmol.build(False, False)\n mf = scf.rohf.ROHF(pmol)\n self.assertAlmostEqual(mf.scf(), -75.627354109594179, 9)\n\n def test_damping(self):\n nao = mol.nao_nr()\n numpy.random.seed(1)\n s = scf.hf.get_ovlp(mol)\n d = numpy.random.random((nao,nao))\n d = d + d.T\n f = scf.hf.damping(s, d, scf.hf.get_hcore(mol), .5)\n self.assertAlmostEqual(numpy.linalg.norm(f), 23361.854064083178, 9)\n\n def test_level_shift(self):\n nao = mol.nao_nr()\n numpy.random.seed(1)\n s = scf.hf.get_ovlp(mol)\n d = numpy.random.random((nao,nao))\n d = d + d.T\n f = scf.hf.level_shift(s, d, scf.hf.get_hcore(mol), .5)\n self.assertAlmostEqual(numpy.linalg.norm(f), 94.230157719053565, 9)\n\n def test_get_veff(self):\n nao = mol.nao_nr()\n numpy.random.seed(1)\n d1 = numpy.random.random((nao,nao))\n d2 = numpy.random.random((nao,nao))\n d = (d1+d1.T, d2+d2.T)\n v = scf.hf.get_veff(mol, d)\n self.assertAlmostEqual(numpy.linalg.norm(v), 199.66041114502335, 9)\n\n def test_hf_symm(self):\n pmol = mol.copy()\n pmol.symmetry = 1\n pmol.build(False, False)\n mf = scf.hf_symm.RHF(pmol)\n self.assertAlmostEqual(mf.scf(), -76.026765673119627, 9)\n pop, chg = mf.analyze()\n self.assertAlmostEqual(numpy.linalg.norm(pop), 4.0048449691540391, 6)\n\n def test_hf_symm_fixnocc(self):\n pmol = mol.copy()\n pmol.symmetry = 1\n pmol.build(False, False)\n mf = scf.hf_symm.RHF(pmol)\n mf.irrep_nelec = {'B1':4}\n self.assertAlmostEqual(mf.scf(), -75.074736446470723, 9)\n pop, chg = mf.analyze()\n self.assertAlmostEqual(numpy.linalg.norm(pop), 3.9778759898704612, 6)\n\n def test_hf_symm_rohf(self):\n pmol = mol.copy()\n pmol.symmetry = 1\n pmol.charge = 1\n pmol.spin = 1\n pmol.build(False, False)\n mf = scf.hf_symm.ROHF(pmol)\n self.assertAlmostEqual(mf.scf(), -75.627354109594179, 9)\n pop, chg = mf.analyze()\n self.assertAlmostEqual(numpy.linalg.norm(pop), 3.6782452972117743, 6)\n\n def test_hf_symm_rohf_fixnocc(self):\n pmol = mol.copy()\n pmol.symmetry = 1\n pmol.charge = 1\n pmol.spin = 1\n pmol.build(False, False)\n mf = scf.hf_symm.ROHF(pmol)\n mf.irrep_nelec = {'B1':(2,1)}\n self.assertAlmostEqual(mf.scf(), -75.008317646307404, 9)\n pop, chg = mf.analyze()\n self.assertAlmostEqual(numpy.linalg.norm(pop), 3.7873076011029529, 6)\n\n def test_n2_symm(self):\n mf = scf.hf_symm.RHF(n2sym)\n self.assertAlmostEqual(mf.scf(), -108.9298383856092, 9)\n\n def test_n2_symm_rohf(self):\n pmol = n2sym.copy()\n pmol.charge = 1\n pmol.spin = 1\n mf = scf.hf_symm.ROHF(pmol)\n self.assertAlmostEqual(mf.scf(), -108.33899076078299, 9)\n\n def test_n2_symm_fixnocc(self):\n mf = scf.hf_symm.RHF(n2sym)\n mf.irrep_nelec = {'A1g':8, 'A1u':2, 'E1ux':2, 'E1uy':2}\n self.assertAlmostEqual(mf.scf(), -106.52905502298771, 9)\n\n def test_n2_symm_rohf_fixnocc(self):\n pmol = n2sym.copy()\n pmol.charge = 1\n pmol.spin = 1\n mf = scf.hf_symm.ROHF(pmol)\n mf.irrep_nelec = {'A1g':6, 'A1u':3, 'E1ux':2, 'E1uy':2}\n self.assertAlmostEqual(mf.scf(), -108.21954550790898, 9)\n\n def test_dot_eri_dm(self):\n numpy.random.seed(1)\n nao = mol.nao_nr()\n dm = numpy.random.random((nao,nao))\n j0, k0 = scf.hf.dot_eri_dm(mf._eri, dm+dm.T, hermi=0)\n j1, k1 = scf.hf.dot_eri_dm(mf._eri, dm+dm.T, hermi=1)\n self.assertTrue(numpy.allclose(j0,j1))\n self.assertTrue(numpy.allclose(k0,k1))\n j1, k1 = scf.hf.dot_eri_dm(mf._eri, dm, hermi=0)\n self.assertAlmostEqual(numpy.linalg.norm(j1), 77.035779188661465, 9)\n self.assertAlmostEqual(numpy.linalg.norm(k1), 46.253491700647963, 9)\n\n def test_ghost_atm_meta_lowdin(self):\n mol = gto.Mole()\n mol.atom = [[\"O\" , (0. , 0. , 0.)],\n ['ghost' , (0. , -0.757, 0.587)],\n [1 , (0. , 0.757 , 0.587)] ]\n mol.verbose = 0\n mol.spin = 1\n mol.symmetry = True\n mol.basis = {'O':'ccpvdz', 'H':'ccpvdz',\n 'GHOST': gto.basis.load('ccpvdz','H')}\n mol.build()\n mf = scf.RHF(mol)\n self.assertAlmostEqual(mf.kernel(), -75.393287998638741, 9)\n\n def test_rhf_get_occ(self):\n mol = gto.M(verbose=7, output='/dev/null').set(nelectron=10)\n mf = scf.hf.RHF(mol)\n energy = numpy.array([-10, -1, 1, -2, 0, -3])\n self.assertTrue(numpy.allclose(mf.get_occ(energy), [2, 2, 0, 2, 2, 2]))\n\n def test_rhf_symm_get_occ(self):\n mf = scf.RHF(n2sym).set(verbose = 0)\n orbsym = numpy.array([0 , 5, 0 , 5 , 6 , 7 , 0 , 2 , 3 , 5 , 0 , 6 , 7 , 0 , 2 , 3 , 5 , 10, 11, 5])\n energy = numpy.array([34, 2, 54, 43, 42, 33, 20, 61, 29, 26, 62, 52, 13, 51, 18, 78, 85, 49, 84, 7])\n mf.irrep_nelec = {'A1g':6, 'A1u':4, 'E1ux':2, 'E1uy':2}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, orbsym=orbsym),\n [2, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 2]))\n mf.irrep_nelec = {'E1ux':2, 'E1uy':2}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, orbsym=orbsym),\n [0, 2, 0, 0, 2, 0, 2, 0, 0, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 2]))\n mf.irrep_nelec = {}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, orbsym=orbsym),\n [0, 2, 0, 0, 0, 0, 2, 0, 2, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 2]))\n\n def test_rohf_get_occ(self):\n mol = gto.M(verbose=7, output='/dev/null').set(nelectron=8, spin=2)\n mf = scf.rohf.ROHF(mol)\n energy = numpy.array([-10, -1, 1, -2, 0, -3])\n self.assertTrue(numpy.allclose(mf.get_occ(energy), [2, 1, 0, 2, 1, 2]))\n pmol = n2sym.copy()\n pmol.spin = 2\n pmol.symmetry = False\n mf = scf.rohf.ROHF(pmol).set(verbose = 0)\n energy = numpy.array([34, 2, 54, 43, 42, 33, 20, 61, 29, 26, 62, 52, 13, 51, 18, 78, 85, 49, 84, 7])\n self.assertTrue(numpy.allclose(mf.get_occ(energy),\n [0, 2, 0, 0, 0, 1, 2, 0, 1, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 2]))\n # 0 virtual\n energy = numpy.array([34, 2, 54, 43, 42, 33, 20, 61])\n self.assertTrue(numpy.allclose(mf.get_occ(energy),\n [2, 2, 1, 2, 2, 2, 2, 1]))\n # 0 core\n mf.nelec = (14, 0)\n energy = numpy.array([34, 2, 54, 43, 42, 33, 20, 61, 29, 26, 62, 52, 13, 51])\n self.assertTrue(numpy.allclose(mf.get_occ(energy),\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\n\n def test_rohf_symm_get_occ(self):\n pmol = n2sym.copy()\n pmol.charge = 0\n pmol.spin = 2\n mf = scf.ROHF(pmol).set(verbose = 0)\n orbsym = numpy.array([0 , 5, 0 , 5 , 6 , 7 , 0 , 2 , 3 , 5 , 0 , 6 , 7 , 0 , 2 , 3 , 5 , 10, 11, 5])\n energy = numpy.array([34, 2, 54, 43, 42, 33, 20, 61, 29, 26, 62, 52, 13, 51, 18, 78, 85, 49, 84, 7])\n mf.irrep_nelec = {'A1g':7, 'A1u':3, 'E1ux':2, 'E1uy':2}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, orbsym=orbsym),\n [2, 2, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 1]))\n mf.irrep_nelec = {'E1ux':2, 'E1uy':2}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, orbsym=orbsym),\n [0, 2, 0, 0, 2, 0, 2, 0, 1, 1, 0, 0, 2, 0, 2, 0, 0, 0, 0, 2]))\n mf.irrep_nelec = {}\n self.assertTrue(numpy.allclose(mf.get_occ(energy, orbsym=orbsym),\n [0, 2, 0, 0, 0, 1, 2, 0, 1, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 2]))\n\n def test_rohf_symm_dump_flags(self):\n pmol = n2sym.copy()\n pmol.spin = 2\n mf = scf.ROHF(pmol).set(verbose = 0)\n mf.irrep_nelec = {'A1g':6, 'A1u':4, 'E1ux':2, 'E1uy':2}\n self.assertRaises(ValueError, mf.dump_flags)\n\n mf.irrep_nelec = {'A1g':6, 'A1u':10, 'E1ux':2, 'E1uy':2}\n self.assertRaises(ValueError, mf.dump_flags)\n\n def test_dip_moment(self):\n mf = scf.RHF(mol)\n mf.scf()\n dip = mf.dip_moment(unit_symbol='au')\n self.assertTrue(numpy.allclose(dip, [0.00000, 0.00000, 0.80985])) \n\nif __name__ == \"__main__\":\n print(\"Full Tests for rhf\")\n unittest.main()\n\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.arange",
"numpy.asarray",
"numpy.iscomplexobj",
"numpy.empty"
],
[
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.min",
"numpy.asarray",
"numpy.arange",
"numpy.max",
"numpy.empty"
],
[
"numpy.dot",
"numpy.random.random",
"numpy.conj",
"numpy.random.seed",
"numpy.array_equal",
"numpy.einsum",
"numpy.arange",
"numpy.eye",
"numpy.allclose",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.dot",
"numpy.hstack",
"numpy.random.random",
"numpy.random.seed",
"numpy.unique",
"numpy.asarray",
"numpy.empty_like",
"numpy.eye",
"numpy.arange",
"numpy.argmax",
"numpy.diag_indices",
"numpy.where",
"numpy.sum"
],
[
"numpy.hstack",
"numpy.sqrt",
"numpy.einsum",
"numpy.reshape",
"numpy.asarray",
"numpy.eye",
"numpy.shape",
"numpy.zeros"
],
[
"numpy.random.random",
"numpy.allclose",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrzhuzhe/yunru | [
"faa7380a5363f654f1dc8f5d53b077d9f33bff6f"
] | [
"yolov5/models/common.py"
] | [
"# YOLOv5 common modules\n\nimport math\nfrom copy import copy\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torch.cuda import amp\n\nfrom utils.datasets import letterbox\nfrom utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box\nfrom utils.plots import colors, plot_one_box\nfrom utils.torch_utils import time_synchronized\n\n\ndef autopad(k, p=None): # kernel, padding\n # Pad to 'same'\n if p is None:\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad\n return p\n\n\ndef DWConv(c1, c2, k=1, s=1, act=True):\n # Depthwise convolution\n return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)\n\n\nclass Conv(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(Conv, self).__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))\n\n\nclass TransformerLayer(nn.Module):\n # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)\n def __init__(self, c, num_heads):\n super().__init__()\n self.q = nn.Linear(c, c, bias=False)\n self.k = nn.Linear(c, c, bias=False)\n self.v = nn.Linear(c, c, bias=False)\n self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)\n self.fc1 = nn.Linear(c, c, bias=False)\n self.fc2 = nn.Linear(c, c, bias=False)\n\n def forward(self, x):\n x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x\n x = self.fc2(self.fc1(x)) + x\n return x\n\n\nclass TransformerBlock(nn.Module):\n # Vision Transformer https://arxiv.org/abs/2010.11929\n def __init__(self, c1, c2, num_heads, num_layers):\n super().__init__()\n self.conv = None\n if c1 != c2:\n self.conv = Conv(c1, c2)\n self.linear = nn.Linear(c2, c2) # learnable position embedding\n self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])\n self.c2 = c2\n\n def forward(self, x):\n if self.conv is not None:\n x = self.conv(x)\n b, _, w, h = x.shape\n p = x.flatten(2)\n p = p.unsqueeze(0)\n p = p.transpose(0, 3)\n p = p.squeeze(3)\n e = self.linear(p)\n x = p + e\n\n x = self.tr(x)\n x = x.unsqueeze(3)\n x = x.transpose(0, 3)\n x = x.reshape(b, self.c2, w, h)\n return x\n\n\nclass Bottleneck(nn.Module):\n # Standard bottleneck\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super(Bottleneck, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass BottleneckCSP(nn.Module):\n # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super(BottleneckCSP, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n self.cv4 = Conv(2 * c_, c2, 1, 1)\n self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)\n self.act = nn.LeakyReLU(0.1, inplace=True)\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n\n def forward(self, x):\n y1 = self.cv3(self.m(self.cv1(x)))\n y2 = self.cv2(x)\n return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass C3(nn.Module):\n # CSP Bottleneck with 3 convolutions\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super(C3, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))\n\n\nclass C3TR(C3):\n # C3 module with TransformerBlock()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = TransformerBlock(c_, c_, 4, n)\n\n\nclass SPP(nn.Module):\n # Spatial pyramid pooling layer used in YOLOv3-SPP\n def __init__(self, c1, c2, k=(5, 9, 13)):\n super(SPP, self).__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\n self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n\n def forward(self, x):\n x = self.cv1(x)\n return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\n\n\nclass Focus(nn.Module):\n # Focus wh information into c-space\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(Focus, self).__init__()\n self.conv = Conv(c1 * 4, c2, k, s, p, g, act)\n # self.contract = Contract(gain=2)\n\n def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)\n return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))\n # return self.conv(self.contract(x))\n\n\nclass Contract(nn.Module):\n # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'\n s = self.gain\n x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)\n return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)\n\n\nclass Expand(nn.Module):\n # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'\n s = self.gain\n x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)\n return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)\n\n\nclass Concat(nn.Module):\n # Concatenate a list of tensors along dimension\n def __init__(self, dimension=1):\n super(Concat, self).__init__()\n self.d = dimension\n\n def forward(self, x):\n return torch.cat(x, self.d)\n\n\nclass NMS(nn.Module):\n # Non-Maximum Suppression (NMS) module\n conf = 0.25 # confidence threshold\n iou = 0.45 # IoU threshold\n classes = None # (optional list) filter by class\n max_det = 1000 # maximum number of detections per image\n\n def __init__(self):\n super(NMS, self).__init__()\n\n def forward(self, x):\n return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det)\n\n\nclass AutoShape(nn.Module):\n # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS\n conf = 0.25 # NMS confidence threshold\n iou = 0.45 # NMS IoU threshold\n classes = None # (optional list) filter by class\n max_det = 1000 # maximum number of detections per image\n\n def __init__(self, model):\n super(AutoShape, self).__init__()\n self.model = model.eval()\n\n def autoshape(self):\n print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()\n return self\n\n @torch.no_grad()\n def forward(self, imgs, size=640, augment=False, profile=False):\n # Inference from various sources. For height=640, width=1280, RGB images example inputs are:\n # filename: imgs = 'data/images/zidane.jpg'\n # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'\n # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)\n # PIL: = Image.open('image.jpg') # HWC x(640,1280,3)\n # numpy: = np.zeros((640,1280,3)) # HWC\n # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)\n # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images\n\n t = [time_synchronized()]\n p = next(self.model.parameters()) # for device and type\n if isinstance(imgs, torch.Tensor): # torch\n with amp.autocast(enabled=p.device.type != 'cpu'):\n return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference\n\n # Pre-process\n n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images\n shape0, shape1, files = [], [], [] # image and inference shapes, filenames\n for i, im in enumerate(imgs):\n f = f'image{i}' # filename\n if isinstance(im, str): # filename or uri\n im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im\n elif isinstance(im, Image.Image): # PIL Image\n im, f = np.asarray(im), getattr(im, 'filename', f) or f\n files.append(Path(f).with_suffix('.jpg').name)\n if im.shape[0] < 5: # image in CHW\n im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)\n im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input\n s = im.shape[:2] # HWC\n shape0.append(s) # image shape\n g = (size / max(s)) # gain\n shape1.append([y * g for y in s])\n imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update\n shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape\n x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad\n x = np.stack(x, 0) if n > 1 else x[0][None] # stack\n x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW\n x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32\n t.append(time_synchronized())\n\n with amp.autocast(enabled=p.device.type != 'cpu'):\n # Inference\n y = self.model(x, augment, profile)[0] # forward\n t.append(time_synchronized())\n\n # Post-process\n y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS\n for i in range(n):\n scale_coords(shape1, y[i][:, :4], shape0[i])\n\n t.append(time_synchronized())\n return Detections(imgs, y, files, t, self.names, x.shape)\n\n\nclass Detections:\n # detections class for YOLOv5 inference results\n def __init__(self, imgs, pred, files, times=None, names=None, shape=None):\n super(Detections, self).__init__()\n d = pred[0].device # device\n gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations\n self.imgs = imgs # list of images as numpy arrays\n self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)\n self.names = names # class names\n self.files = files # image filenames\n self.xyxy = pred # xyxy pixels\n self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels\n self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized\n self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized\n self.n = len(self.pred) # number of images (batch size)\n self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)\n self.s = shape # inference BCHW shape\n\n def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):\n for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):\n str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '\n if pred is not None:\n for c in pred[:, -1].unique():\n n = (pred[:, -1] == c).sum() # detections per class\n str += f\"{n} {self.names[int(c)]}{'s' * (n > 1)}, \" # add to string\n if show or save or render or crop:\n for *box, conf, cls in pred: # xyxy, confidence, class\n label = f'{self.names[int(cls)]} {conf:.2f}'\n if crop:\n save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])\n else: # all others\n plot_one_box(box, im, label=label, color=colors(cls))\n\n im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np\n if pprint:\n print(str.rstrip(', '))\n if show:\n im.show(self.files[i]) # show\n if save:\n f = self.files[i]\n im.save(save_dir / f) # save\n print(f\"{'Saved' * (i == 0)} {f}\", end=',' if i < self.n - 1 else f' to {save_dir}\\n')\n if render:\n self.imgs[i] = np.asarray(im)\n\n def print(self):\n self.display(pprint=True) # print results\n print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)\n\n def show(self):\n self.display(show=True) # show results\n\n def save(self, save_dir='runs/hub/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir\n self.display(save=True, save_dir=save_dir) # save results\n\n def crop(self, save_dir='runs/hub/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir\n self.display(crop=True, save_dir=save_dir) # crop results\n print(f'Saved results to {save_dir}\\n')\n\n def render(self):\n self.display(render=True) # render results\n return self.imgs\n\n def pandas(self):\n # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])\n new = copy(self) # return copy\n ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns\n cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns\n for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):\n a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update\n setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])\n return new\n\n def tolist(self):\n # return a list of Detections objects, i.e. 'for result in results.tolist():'\n x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]\n for d in x:\n for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:\n setattr(d, k, getattr(d, k)[0]) # pop out of list\n return x\n\n def __len__(self):\n return self.n\n\n\nclass Classify(nn.Module):\n # Classification head, i.e. x(b,c1,20,20) to x(b,c2)\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups\n super(Classify, self).__init__()\n self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)\n self.flat = nn.Flatten()\n\n def forward(self, x):\n z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list\n return self.flat(self.conv(z)) # flatten to x(b,c2)"
] | [
[
"torch.cat",
"numpy.asarray",
"pandas.DataFrame",
"torch.cuda.amp.autocast",
"torch.no_grad",
"torch.nn.MultiheadAttention",
"torch.from_numpy",
"numpy.stack",
"torch.tensor",
"numpy.ascontiguousarray",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.SiLU",
"torch.nn.Flatten",
"numpy.tile",
"torch.nn.MaxPool2d",
"torch.nn.Identity",
"torch.nn.AdaptiveAvgPool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nadavyayon/cell2location | [
"54141fb85d4b0d64825dfdb6d1bf147b025c856b",
"54141fb85d4b0d64825dfdb6d1bf147b025c856b"
] | [
"cell2location/plt/plot_factor_spatial.py",
"cell2location/models/pymc3/simplified/LocationModelHierarchicalW.py"
] | [
"#!pip install plotnine\nimport numpy as np\nimport pandas as pd\nimport plotnine\n\n\ndef plot_factor_spatial(\n adata,\n fact,\n cluster_names,\n fact_ind=[0],\n trans=\"log\",\n sample_name=None,\n samples_col=\"sample\",\n obs_x=\"imagecol\",\n obs_y=\"imagerow\",\n n_columns=6,\n max_col=5000,\n col_breaks=[0.1, 100, 1000, 3000],\n figure_size=(24, 5.7),\n point_size=0.8,\n text_size=9,\n):\n r\"\"\"Plot expression of factors / cell types in space.\n Convenient but not as powerful as scanpy plotting.\n :param adata: anndata object with spatial data\n :param fact: pd.DataFrame with spatial expression of factors (W), e.g. mod.spot_factors_df\n :param cluster_names: names of those factors to show on a plot\n :param fact_ind: index of factors to plot\n :param trans: transform colorscale? passed to plotnine.scale_color_cmap\n :param sample_name: if anndata object contains multiple samples specify which sample to plot (no warning given if not)\n :param samples_col: if anndata object contains multiple which .obs columns specifies sample?\n :param obs_x: which .obs columns specifies x coordinate?\n :param obs_y: which .obs columns specifies y coordinate?\n :param n_columns: how many factors / clusters to plot in each row (plotnine.facet_grid)\n :param max_col: colorscale maximum expression in fact\n :param col_breaks: colorscale breaks\n :param figure_size: figures size works weirdly (only x axis has an effect, use 24 for 6-column plot, 12 for 3, 8 for 2 ...).\n :param point_size: point size of spots\n :param text_size: text size\n \"\"\"\n\n if sample_name is not None:\n sample_ind = np.isin(adata.obs[samples_col], sample_name)\n else:\n sample_ind = np.repeat(True, adata.shape[0])\n\n # adata.obsm['X_spatial'][:,0] vs adata.obs['imagecol'] & adata.obs['imagerow']\n\n for_plot = np.concatenate(\n (\n adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),\n -adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),\n fact.iloc[:, fact_ind[0]].values.reshape((adata.obs.shape[0], 1)),\n np.array([cluster_names[fact_ind[0]] for j in range(adata.obs.shape[0])]).reshape((adata.obs.shape[0], 1)),\n ),\n 1,\n )\n for_plot = pd.DataFrame(for_plot, index=adata.obs.index, columns=[\"imagecol\", \"imagerow\", \"weights\", \"cluster\"])\n # select only correct sample\n for_plot = for_plot.loc[sample_ind, :]\n\n for i in fact_ind[1:]:\n for_plot1 = np.concatenate(\n (\n adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),\n -adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),\n fact.iloc[:, i].values.reshape((adata.obs.shape[0], 1)),\n np.array([cluster_names[i] for j in range(adata.obs.shape[0])]).reshape((adata.obs.shape[0], 1)),\n ),\n 1,\n )\n for_plot1 = pd.DataFrame(\n for_plot1, index=adata.obs.index, columns=[\"imagecol\", \"imagerow\", \"weights\", \"cluster\"]\n )\n # select only correct sample\n for_plot1 = for_plot1.loc[sample_ind, :]\n for_plot = pd.concat((for_plot, for_plot1))\n\n for_plot[\"imagecol\"] = pd.to_numeric(for_plot[\"imagecol\"])\n for_plot[\"imagerow\"] = pd.to_numeric(for_plot[\"imagerow\"])\n for_plot[\"weights\"] = pd.to_numeric(for_plot[\"weights\"])\n for_plot[\"cluster\"] = pd.Categorical(for_plot[\"cluster\"], categories=cluster_names[fact_ind], ordered=True)\n\n # print(np.log(np.max(for_plot['weights'])))\n ax = (\n plotnine.ggplot(for_plot, plotnine.aes(\"imagecol\", \"imagerow\", color=\"weights\"))\n + plotnine.geom_point(size=point_size)\n + plotnine.scale_color_cmap(\"magma\", trans=trans, limits=[0.1, max_col], breaks=col_breaks + [max_col])\n + plotnine.coord_fixed()\n + plotnine.theme_bw()\n + plotnine.theme(\n panel_background=plotnine.element_rect(fill=\"black\", colour=\"black\", size=0, linetype=\"solid\"),\n panel_grid_major=plotnine.element_line(size=0, linetype=\"solid\", colour=\"black\"),\n panel_grid_minor=plotnine.element_line(size=0, linetype=\"solid\", colour=\"black\"),\n strip_text=plotnine.element_text(size=text_size),\n )\n + plotnine.facet_wrap(\"~cluster\", ncol=n_columns)\n + plotnine.ggtitle(\"nUMI from each cell type\")\n + plotnine.theme(figure_size=figure_size)\n )\n\n return ax\n\n\ndef plot_categ_spatial(mod, adata, sample_col, color, n_columns=2, figure_size=(24, 5.7), point_size=0.8, text_size=9):\n\n for_plot = adata.obs[[\"imagecol\", \"imagerow\", sample_col]]\n for_plot[\"color\"] = color\n\n # fix types\n for_plot[\"color\"] = pd.Categorical(for_plot[\"color\"], ordered=True)\n # for_plot['color'] = pd.to_numeric(for_plot['color'])\n for_plot[\"sample\"] = pd.Categorical(for_plot[sample_col], ordered=False)\n for_plot[\"imagecol\"] = pd.to_numeric(for_plot[\"imagecol\"])\n for_plot[\"imagerow\"] = -pd.to_numeric(for_plot[\"imagerow\"])\n\n ax = (\n plotnine.ggplot(for_plot, plotnine.aes(x=\"imagecol\", y=\"imagerow\", color=\"color\"))\n + plotnine.geom_point(size=point_size) # + plotnine.scale_color_cmap()\n + plotnine.coord_fixed()\n + plotnine.theme_bw()\n + plotnine.theme(\n panel_background=plotnine.element_rect(fill=\"black\", colour=\"black\", size=0, linetype=\"solid\"),\n panel_grid_major=plotnine.element_line(size=0, linetype=\"solid\", colour=\"black\"),\n panel_grid_minor=plotnine.element_line(size=0, linetype=\"solid\", colour=\"black\"),\n strip_text=plotnine.element_text(size=text_size),\n )\n + plotnine.facet_wrap(\"~sample\", ncol=n_columns)\n + plotnine.theme(figure_size=figure_size)\n )\n\n return ax\n",
"# -*- coding: utf-8 -*-\nr\"\"\"Location model decomposes the expression of genes across locations into a set of reference regulatory programmes,\n it is identical to LocationModelLinearDependentW but does not account for correlation of programs\n across locations with similar cell composition, thus has reduced accuracy.\"\"\"\n\nimport numpy as np\nimport pymc3 as pm\nimport theano.tensor as tt\n\nfrom cell2location.models.base.pymc3_loc_model import Pymc3LocModel\n\n\n# defining the model itself\nclass LocationModelHierarchicalW(Pymc3LocModel):\n r\"\"\"Provided here as a 'base' model.\n\n Parameters\n ----------\n cell_state_mat :\n Pandas data frame with gene programmes - genes in rows, cell types / factors in columns\n X_data :\n Numpy array of gene expression (cols) in spatial locations (rows)\n n_iter :\n number of training iterations\n learning_rate, data_type, total_grad_norm_constraint, ...:\n See parent class BaseModel for details.\n gene_level_prior :\n see the description for CoLocationModelNB4V2\n gene_level_var_prior :\n see the description for CoLocationModelNB4V2\n cell_number_prior :\n see the description for CoLocationModelNB4V2, this model does not have **combs_per_spot**\n parameter.\n cell_number_var_prior :\n see the description for CoLocationModelNB4V2, this model does not have\n **combs_mean_var_ratio** parameter.\n phi_hyp_prior :\n see the description for CoLocationModelNB4V2\n\n Returns\n -------\n\n \"\"\"\n # LocationModelNB4V7_V4_V4\n def __init__(\n self,\n cell_state_mat: np.ndarray,\n X_data: np.ndarray,\n data_type: str = \"float32\",\n n_iter=20000,\n learning_rate=0.005,\n total_grad_norm_constraint=200,\n verbose=True,\n var_names=None,\n var_names_read=None,\n obs_names=None,\n fact_names=None,\n sample_id=None,\n gene_level_prior={\"mean\": 1 / 2, \"sd\": 1 / 4},\n gene_level_var_prior={\"mean_var_ratio\": 1},\n cell_number_prior={\"cells_per_spot\": 8, \"factors_per_spot\": 7},\n cell_number_var_prior={\"cells_mean_var_ratio\": 1, \"factors_mean_var_ratio\": 1},\n phi_hyp_prior={\"mean\": 3, \"sd\": 1},\n ):\n ############# Initialise parameters ################\n super().__init__(\n cell_state_mat,\n X_data,\n data_type,\n n_iter,\n learning_rate,\n total_grad_norm_constraint,\n verbose,\n var_names,\n var_names_read,\n obs_names,\n fact_names,\n sample_id,\n )\n\n for k in gene_level_var_prior.keys():\n gene_level_prior[k] = gene_level_var_prior[k]\n self.gene_level_prior = gene_level_prior\n\n for k in cell_number_var_prior.keys():\n cell_number_prior[k] = cell_number_var_prior[k]\n self.cell_number_prior = cell_number_prior\n\n self.phi_hyp_prior = phi_hyp_prior\n\n ############# Define the model ################\n self.model = pm.Model()\n\n with self.model:\n # =====================Gene expression level scaling======================= #\n # Explains difference in expression between genes and\n # how it differs in single cell and spatial technology\n # compute hyperparameters from mean and sd\n shape = gene_level_prior[\"mean\"] ** 2 / gene_level_prior[\"sd\"] ** 2\n rate = gene_level_prior[\"mean\"] / gene_level_prior[\"sd\"] ** 2\n shape_var = shape / gene_level_prior[\"mean_var_ratio\"]\n rate_var = rate / gene_level_prior[\"mean_var_ratio\"]\n self.gene_level_alpha_hyp = pm.Gamma(\n \"gene_level_alpha_hyp\", mu=shape, sigma=np.sqrt(shape_var), shape=(1, 1)\n )\n self.gene_level_beta_hyp = pm.Gamma(\"gene_level_beta_hyp\", mu=rate, sigma=np.sqrt(rate_var), shape=(1, 1))\n\n self.gene_level = pm.Gamma(\n \"gene_level\", self.gene_level_alpha_hyp, self.gene_level_beta_hyp, shape=(self.n_var, 1)\n )\n\n # scale cell state factors by gene_level\n self.gene_factors = pm.Deterministic(\"gene_factors\", self.cell_state)\n # tt.printing.Print('gene_factors sum')(gene_factors.sum(0).shape)\n # tt.printing.Print('gene_factors sum')(gene_factors.sum(0))\n\n # =====================Spot factors======================= #\n # prior on spot factors reflects the number of cells, fraction of their cytoplasm captured,\n # times heterogeniety in the total number of mRNA between individual cells with each cell type\n self.cells_per_spot = pm.Gamma(\n \"cells_per_spot\",\n mu=cell_number_prior[\"cells_per_spot\"],\n sigma=np.sqrt(cell_number_prior[\"cells_per_spot\"] / cell_number_prior[\"cells_mean_var_ratio\"]),\n shape=(self.n_obs, 1),\n )\n self.factors_per_spot = pm.Gamma(\n \"factors_per_spot\",\n mu=cell_number_prior[\"factors_per_spot\"],\n sigma=np.sqrt(cell_number_prior[\"factors_per_spot\"] / cell_number_prior[\"factors_mean_var_ratio\"]),\n shape=(self.n_obs, 1),\n )\n\n shape = self.factors_per_spot / np.array(self.n_fact).reshape((1, 1))\n rate = tt.ones((1, 1)) / self.cells_per_spot * self.factors_per_spot\n self.spot_factors = pm.Gamma(\"spot_factors\", alpha=shape, beta=rate, shape=(self.n_obs, self.n_fact))\n\n # =====================Spot-specific additive component======================= #\n # molecule contribution that cannot be explained by cell state signatures\n # these counts are distributed between all genes not just expressed genes\n self.spot_add_hyp = pm.Gamma(\"spot_add_hyp\", 1, 0.1, shape=2)\n self.spot_add = pm.Gamma(\"spot_add\", self.spot_add_hyp[0], self.spot_add_hyp[1], shape=(self.n_obs, 1))\n\n # =====================Gene-specific additive component ======================= #\n # per gene molecule contribution that cannot be explained by cell state signatures\n # these counts are distributed equally between all spots (e.g. background, free-floating RNA)\n self.gene_add_hyp = pm.Gamma(\"gene_add_hyp\", 1, 1, shape=2)\n self.gene_add = pm.Gamma(\"gene_add\", self.gene_add_hyp[0], self.gene_add_hyp[1], shape=(self.n_var, 1))\n\n # =====================Gene-specific overdispersion ======================= #\n self.phi_hyp = pm.Gamma(\"phi_hyp\", mu=phi_hyp_prior[\"mean\"], sigma=phi_hyp_prior[\"sd\"], shape=(1, 1))\n self.gene_E = pm.Exponential(\"gene_E\", self.phi_hyp, shape=(self.n_var, 1))\n\n # =====================Expected expression ======================= #\n # expected expression\n self.mu_biol = (\n pm.math.dot(self.spot_factors, self.gene_factors.T) * self.gene_level.T\n + self.gene_add.T\n + self.spot_add\n )\n # tt.printing.Print('mu_biol')(self.mu_biol.shape)\n\n # =====================DATA likelihood ======================= #\n # Likelihood (sampling distribution) of observations & add overdispersion via NegativeBinomial / Poisson\n self.data_target = pm.NegativeBinomial(\n \"data_target\",\n mu=self.mu_biol,\n alpha=1 / (self.gene_E.T * self.gene_E.T),\n observed=self.x_data,\n total_size=self.X_data.shape,\n )\n\n # =====================Compute nUMI from each factor in spots ======================= #\n self.nUMI_factors = pm.Deterministic(\n \"nUMI_factors\", (self.spot_factors * (self.gene_factors * self.gene_level).sum(0))\n )\n\n def compute_expected(self):\n r\"\"\"Compute expected expression of each gene in each spot (Poisson mu). Useful for evaluating how well\n the model learned expression pattern of all genes in the data.\n\n \"\"\"\n\n # compute the poisson rate\n self.mu = (\n np.dot(\n self.samples[\"post_sample_means\"][\"spot_factors\"], self.samples[\"post_sample_means\"][\"gene_factors\"].T\n )\n * self.samples[\"post_sample_means\"][\"gene_level\"].T\n + self.samples[\"post_sample_means\"][\"gene_add\"].T\n + self.samples[\"post_sample_means\"][\"spot_add\"]\n )\n"
] | [
[
"pandas.concat",
"pandas.Categorical",
"pandas.DataFrame",
"numpy.repeat",
"pandas.to_numeric",
"numpy.isin"
],
[
"numpy.dot",
"numpy.array",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NCAR/GeoCAT-examples | [
"5ed9a1d68b69a921d0f1fee1160e109853926ed9",
"5ed9a1d68b69a921d0f1fee1160e109853926ed9",
"5ed9a1d68b69a921d0f1fee1160e109853926ed9"
] | [
"Plots/Scatter/NCL_scatter_5.py",
"Plots/Panels/NCL_panel_13.py",
"Plots/Shapefiles/NCL_shapefiles_1.py"
] | [
"\"\"\"\nNCL_scatter_5.py\n================\nThis script illustrates the following concepts:\n - Drawing a scatter plot with markers of different colors\n - Generating dummy data using \"random.normal\"\n - Manually creating a legend using markers and text\n - Customizing the label locations in a legend\n - Changing the orientation of a legend\n - Drawing a legend outside an XY plot\n - Changing the markers in an XY plot\n - Changing the marker color in an XY plot\n - Changing the marker size in an XY plot\n\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/scatter_5.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/scatter_5_lg.png\n\"\"\"\n\n##############################################################################\n# Import packages:\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cycler import cycler\n\nfrom geocat.viz import util as gvutil\n\n##############################################################################\n# Generate random data from a normal (Gaussian) distribution with a mean of 10 and standard deviation of 3\nnpts = 300\nrandom = np.random.default_rng(seed=1)\ndata = random.normal(loc=10, scale=3, size=npts)\n\n##############################################################################\n# Specify colors and markers\ncolors = [\n 'darkgoldenrod', 'darkgreen', 'coral', 'cyan', 'firebrick', 'darkslateblue',\n 'limegreen', 'goldenrod'\n]\nmarkers = ['+', '*', 'o', 'x', 's', '^', 'v', 'D']\n\n# This line cycles which color is used to plot the markers\nplt.rcParams['axes.prop_cycle'] = cycler(color=colors)\n\n##############################################################################\n# Plot\nfig = plt.figure(figsize=(8, 8))\n# Adjust the axes size to accommodate the legend at the bottom\nax = plt.axes([0.15, 0.2, 0.75, 0.70])\n\n# Divide data into 8 bins and plot\nnumBins = 8\nindices = np.arange(0, 300)\npartitions = np.linspace(0, 20, numBins + 1)\nlabel = \"{start:g}:{end:g}\"\nfor x in range(0, numBins):\n bins = np.where(data > partitions[x], data, np.nan)\n with np.errstate(\n invalid='ignore'\n ): # Indeed not needed, just to get rid of warnings about numpy's NaN comparisons\n bins = np.where(bins < partitions[x + 1], bins, np.nan)\n indices = np.where(bins != np.nan, indices, np.nan)\n plt.plot(indices,\n bins,\n marker=markers[x],\n fillstyle='none',\n linewidth=0,\n label=label.format(start=partitions[x], end=partitions[x + 1]))\n\n# `ncol` being equal to the number of labels makes it appear horizontal\nlegend = ax.legend(bbox_to_anchor=(-0.075, -0.2),\n ncol=numBins,\n loc='lower left',\n columnspacing=0.5,\n frameon=False)\nfor txt in legend.get_texts():\n txt.set_ha(\"center\") # horizontal alignment of text item\n txt.set_va(\"center\") # vertical alignment of text item\n # Move label text so it is centered under the marker\n txt.set_x(-25) # x-position\n txt.set_y(-20) # y-position\n\n# Use geocat.viz.util convenience function to set axes parameters\ngvutil.set_axes_limits_and_ticks(ax,\n xlim=(0, 300),\n ylim=(0, 21),\n xticks=range(0, 301, 50),\n yticks=range(0, 22, 3))\n\n# Use geocat.viz.util convenience function to add minor and major tick lines\ngvutil.add_major_minor_ticks(ax,\n x_minor_per_major=5,\n y_minor_per_major=3,\n labelsize=14)\n\n# Use geocat.viz.util convenience function to set titles and labels\ngvutil.set_titles_and_labels(ax, maintitle=\"Scatter plot with grouped markers\")\n\nplt.show()\n",
"\"\"\"\nNCL_panel_13.py\n===============\nThis script illustrates the following concepts:\n - Overlaying a vector field over filled contours\n - Paneling two plots vertically\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: http://www.ncl.ucar.edu/Applications/Scripts/panel_13.ncl\n - Original NCL plot: http://www.ncl.ucar.edu/Applications/Images/panel_13_lg.png\n\nNote:\n Due to differences in how NCL and Python scale glyphs in vector fields, the\n smallest vectors in the Python version are much harder to read than in the\n NCL version. An issue has been opened on the geoCAT examples gallery github\n so this can be addressed at a later date.\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport cartopy.crs as ccrs\nfrom cartopy.mpl.gridliner import LongitudeFormatter, LatitudeFormatter\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FormatStrFormatter\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport xarray as xr\n\nimport geocat.datafiles as gdf\nfrom geocat.viz import cmaps as gvcmaps\nimport geocat.viz.util as gvutil\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into xarrays\nds = xr.open_dataset(gdf.get(\"netcdf_files/uv300.nc\"))\n\n# Extract data from second timestep\nds = ds.isel(time=1).drop_vars('time')\n\n# Ensure longitudes range from 0 to 360 degrees\nU = gvutil.xr_add_cyclic_longitudes(ds.U, \"lon\")\nV = gvutil.xr_add_cyclic_longitudes(ds.V, \"lon\")\n\n# Thin data to only include every fourth value\nU = U[::4, ::4]\nV = V[::4, ::4]\n\n# Calculate the magnitude of the winds\nmagnitude = np.sqrt(U.data**2 + V.data**2)\n\n###############################################################################\n# Plot:\n\n# Create subplots and specify their projections\nprojection = ccrs.PlateCarree()\nfig, axs = plt.subplots(2,\n 1,\n figsize=(7, 10),\n subplot_kw={\"projection\": projection})\nplt.tight_layout(pad=4, h_pad=7)\n\n# Add coastlines, the zorder keyword specifies the order in which the elements\n# are drawn where elements with lower zorder values are drawn first\naxs[0].coastlines(linewidth=0.5, zorder=1)\naxs[1].coastlines(linewidth=0.5, zorder=1)\n\n# Use geocat.viz.util convenience function to set axes tick values\ngvutil.set_axes_limits_and_ticks(axs[0],\n xlim=[-180, 180],\n ylim=[-90, 90],\n xticks=np.arange(-180, 181, 30),\n yticks=np.arange(-90, 91, 30))\ngvutil.set_axes_limits_and_ticks(axs[1],\n xlim=[-180, 180],\n ylim=[-90, 90],\n xticks=np.arange(-180, 181, 30),\n yticks=np.arange(-90, 91, 30))\n\n# Use geocat.viz.util convenience function to add minor and major tick lines\ngvutil.add_major_minor_ticks(axs[0])\ngvutil.add_major_minor_ticks(axs[1])\n\n# Use geocat.viz.util convenience function to make plots look like NCL plots by\n# using latitude, longitude tick labels\ngvutil.add_lat_lon_ticklabels(axs[0])\ngvutil.add_lat_lon_ticklabels(axs[1])\n# Remove the degree symbol from tick labels\naxs[0].yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\naxs[0].xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\naxs[1].yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\naxs[1].xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n\n# Use geocat.viz.util convenience function to set titles and labels\ngvutil.set_titles_and_labels(axs[0],\n lefttitle='Speed',\n lefttitlefontsize=10,\n righttitle=U.units,\n righttitlefontsize=10)\ngvutil.set_titles_and_labels(axs[1],\n lefttitle='Wind',\n lefttitlefontsize=10,\n righttitle=U.units,\n righttitlefontsize=10)\n\n# Load in colormap\nnewcmap = gvcmaps.gui_default\n\n# Specify contour levels and contour ticks\nspeed_levels = np.arange(0, 40, 2.5)\nspeed_ticks = np.arange(2.5, 37.5, 2.5)\nwind_levels = np.arange(-16, 44, 4)\nwind_ticks = np.arange(-12, 40, 4)\n\n# Plot filled contours\nspeed = axs[0].contourf(U['lon'],\n U['lat'],\n magnitude,\n levels=speed_levels,\n cmap=newcmap,\n zorder=0)\nwind = axs[1].contourf(U['lon'],\n U['lat'],\n U.data,\n levels=wind_levels,\n cmap=newcmap,\n zorder=0)\n\n# Create color bars\nspeed_cbar = plt.colorbar(speed,\n ax=axs[0],\n orientation='horizontal',\n ticks=speed_ticks,\n shrink=0.8,\n drawedges=True,\n pad=0.1)\nplt.colorbar(wind,\n ax=axs[1],\n orientation='horizontal',\n ticks=wind_ticks,\n shrink=0.8,\n drawedges=True,\n pad=0.1)\n# Remove trailing zeros from speed color bar tick labels\nspeed_cbar.ax.xaxis.set_major_formatter(FormatStrFormatter('%g'))\n\n# Plotting vector field\nquiver_speed = axs[0].quiver(U['lon'],\n U['lat'],\n U.data,\n V.data,\n scale=400,\n width=0.002,\n headwidth=6,\n headlength=7,\n zorder=2)\nquiver_wind = axs[1].quiver(U['lon'],\n U['lat'],\n U.data,\n V.data,\n scale=400,\n width=0.002,\n headwidth=6,\n headlength=7,\n zorder=2)\n\n# Add white box to go behind reference vector\naxs[0].add_patch(\n mpatches.Rectangle(xy=[0.775, 0],\n width=0.225,\n height=0.2,\n facecolor='white',\n transform=axs[0].transAxes,\n zorder=2))\naxs[1].add_patch(\n mpatches.Rectangle(xy=[0.775, 0],\n width=0.225,\n height=0.2,\n facecolor='white',\n transform=axs[1].transAxes,\n zorder=2))\n# Add reference vector and label\naxs[0].quiverkey(quiver_speed, 0.8875, 0.1, 20, 20, zorder=2)\naxs[1].quiverkey(quiver_wind, 0.8875, 0.1, 20, 20, zorder=2)\naxs[0].text(0.785,\n 0.025,\n \"Reference Vector\",\n transform=axs[0].transAxes,\n zorder=2)\naxs[1].text(0.785,\n 0.025,\n \"Reference Vector\",\n transform=axs[1].transAxes,\n zorder=2)\n\nplt.show()\n",
"\"\"\"\nNCL_shapefiles_1.py\n===================\nThis script illustrates the following concepts:\n - Reading shapefiles\n - Plotting data from shapefiles\n - Using shapefile data to plot unemployment percentages in the U.S.\n - Drawing a custom colorbar on a map\n - Drawing filled polygons over a Lambert Conformal plot\n - Drawing the US with a Lambert Conformal projection\n - Zooming in on a particular area on a Lambert Conformal map\n - Centering the labels under the colorbar boxes\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/shapefiles_1.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/shapefiles_1_lg.png\n\nNote:\n At the time of making this example, there isn't a good way to draw tick\n marks along with the latitude and longitude labels. We have chosen to draw\n gridlines to show exactly where the labels are pointing. The gridlines can\n be removed by calling ``gl.xlines = False`` and ``gl.ylines = False``\n after drawing the labels.\n\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nimport matplotlib.ticker as mticker\nimport shapefile as shp\nimport numpy as np\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\n\nimport geocat.datafiles as gdf\nfrom geocat.viz import util as gvutil\n\n###############################################################################\n# Read in data:\n\n# Open all shapefiles and associated .dbf, .shp, and .prj files\nopen(gdf.get(\"shape_files/states.dbf\"), 'r')\nopen(gdf.get(\"shape_files/states.shp\"), 'r')\nopen(gdf.get(\"shape_files/states.shx\"), 'r')\nopen(gdf.get(\"shape_files/states.prj\"), 'r')\n\n# Open shapefiles\nshapefile = shp.Reader(gdf.get(\"shape_files/states.dbf\"))\n\n###############################################################################\n# Set color map colors and bounds\ncolormap = colors.ListedColormap(['blue', 'lime', 'yellow', 'red'])\n\ncolorbounds = [0.5, 1.5, 2.5, 3.5, 4.5]\n\nnorm = colors.BoundaryNorm(colorbounds, colormap.N)\n\n###############################################################################\n# Helper function to determine state color:\n\n\ndef color_assignment(record):\n population = record.PERSONS\n unempolyment = record.UNEMPLOY\n percent = unempolyment / population\n if (0.01 <= percent and percent < 0.02):\n return colormap.colors[0]\n elif (0.02 <= percent and percent < 0.03):\n return colormap.colors[1]\n elif (0.03 <= percent and percent < 0.04):\n return colormap.colors[2]\n elif (0.04 <= percent):\n return colormap.colors[3]\n\n\n###############################################################################\n# Plot:\nplt.figure(figsize=(10, 8))\nax = plt.axes(projection=ccrs.LambertConformal(standard_parallels=(33, 45),\n central_longitude=-98))\nax.set_extent([-125, -74, 22, 50])\n\nax.add_feature(cfeature.LAND, color='silver', zorder=0)\nax.add_feature(cfeature.LAKES, color='white', zorder=1)\n\nfor i in range(0, len(shapefile.shapes())):\n shape = shapefile.shape(i)\n record = shapefile.record(i)\n color = color_assignment(record)\n # if a shape has multiple parts make each one a separate patch\n if len(shape.parts) > 1:\n for j in range(0, len(shape.parts)):\n start_index = shape.parts[j]\n # the last part uses the remaining points and doesn't require and end_index\n if (j is (len(shape.parts) - 1)):\n patch = mpatches.Polygon(shape.points[start_index:],\n facecolor=color,\n edgecolor='black',\n linewidth=0.5,\n transform=ccrs.PlateCarree(),\n zorder=2)\n else:\n end_index = shape.parts[j + 1]\n patch = mpatches.Polygon(shape.points[start_index:end_index],\n facecolor=color,\n edgecolor='black',\n linewidth=0.5,\n transform=ccrs.PlateCarree(),\n zorder=2)\n ax.add_patch(patch)\n else:\n patch = mpatches.Polygon(shape.points,\n facecolor=color,\n edgecolor='black',\n linewidth=0.5,\n transform=ccrs.PlateCarree(),\n zorder=2)\n ax.add_patch(patch)\n\n# Create colorbar\nplt.colorbar(cm.ScalarMappable(cmap=colormap, norm=norm),\n ax=ax,\n boundaries=colorbounds,\n orientation='horizontal',\n shrink=0.75,\n ticks=[1, 2, 3, 4],\n label='percent',\n aspect=30,\n pad=0.075)\n\n# Add latitude and longitude labels\ngl = ax.gridlines(draw_labels=True, x_inline=False, y_inline=False)\ngl.xlocator = mticker.FixedLocator(np.linspace(-120, -80, 5))\ngl.ylocator = mticker.FixedLocator(np.linspace(25, 45, 5))\ngl.xlabel_style = {'rotation': 0}\ngl.ylabel_style = {'rotation': 0}\n\n# Use geocat.viz.util convenience function to set titles and labels\ngvutil.set_titles_and_labels(ax, maintitle='Percentage unemployment, by state')\n\nplt.show()\n"
] | [
[
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.axes",
"numpy.errstate",
"matplotlib.pyplot.show",
"numpy.where",
"numpy.random.default_rng",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.sqrt",
"numpy.arange",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.show"
],
[
"matplotlib.colors.BoundaryNorm",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Nick-AhSen/iGibson | [
"c6854f11eec5d935fa3ef3d6d4852c6571beab4b"
] | [
"igibson/examples/demo/generate_data_semseg_lidar.py"
] | [
"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport igibson\nfrom igibson.envs.igibson_env import iGibsonEnv\n\n\ndef get_lidar_sampling_pattern():\n lidar_vertical_low = -15 / 180.0 * np.pi\n lidar_vertical_high = 15 / 180.0 * np.pi\n lidar_vertical_n_beams = 16\n lidar_vertical_beams = np.arange(\n lidar_vertical_low,\n lidar_vertical_high + (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),\n (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),\n )\n\n lidar_horizontal_low = -45 / 180.0 * np.pi\n lidar_horizontal_high = 45 / 180.0 * np.pi\n lidar_horizontal_n_beams = 468\n lidar_horizontal_beams = np.arange(\n lidar_horizontal_low,\n lidar_horizontal_high,\n (lidar_horizontal_high - lidar_horizontal_low) / (lidar_horizontal_n_beams),\n )\n\n xx, yy = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams)\n xx = xx.flatten()\n yy = yy.flatten()\n\n height = 128\n\n x_samples = (np.tan(xx) / np.cos(yy) * height // 2 + height // 2).astype(np.int)\n y_samples = (np.tan(yy) * height // 2 + height // 2).astype(np.int)\n\n x_samples = x_samples.flatten()\n y_samples = y_samples.flatten()\n return x_samples, y_samples\n\n\nx_samples, y_samples = get_lidar_sampling_pattern()\n\n\ndef generate_data_lidar(nav_env, num_samples=3):\n\n rgb_all = []\n lidar_all = []\n lidar_all_2 = []\n label_all = []\n\n point = nav_env.scene.get_random_point()[1]\n\n for _ in range(num_samples):\n new_point = nav_env.scene.get_random_point()[1]\n\n while np.linalg.norm(new_point - point) > 1:\n new_point = nav_env.scene.get_random_point()[1]\n\n delta_pos = new_point - point\n delta_pos = np.array([delta_pos[1], delta_pos[2], delta_pos[0]])\n # print(delta_pos)\n nav_env.robots[0].set_position(new_point)\n pano_rgb = nav_env.simulator.renderer.get_cube(mode=\"rgb\", use_robot_camera=True)\n pano_3d = nav_env.simulator.renderer.get_cube(mode=\"3d\", use_robot_camera=True)\n pano_seg = nav_env.simulator.renderer.get_cube(mode=\"seg\", use_robot_camera=True)\n\n r3 = np.array(\n [[np.cos(-np.pi / 2), 0, -np.sin(-np.pi / 2)], [0, 1, 0], [np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2)]]\n )\n transformatiom_matrix = np.eye(3)\n\n for i in range(4):\n lidar_all.append(pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] - delta_pos[None, :])\n rgb_all.append(pano_rgb[i][:, :, :3][x_samples, y_samples])\n label_all.append(pano_seg[i][:, :, 0][x_samples, y_samples] * 255.0)\n lidar_all_2.append(\n pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] * 0.9 - delta_pos[None, :]\n )\n transformatiom_matrix = r3.dot(transformatiom_matrix)\n\n lidar_all = np.concatenate(lidar_all, 0).astype(np.float32)\n lidar_all_2 = np.concatenate(lidar_all_2, 0).astype(np.float32)\n rgb_all = np.concatenate(rgb_all, 0).astype(np.float32)\n label_all = np.concatenate(label_all, 0).astype(np.int32)\n\n assert len(label_all) == len(label_all)\n\n direction = lidar_all - lidar_all_2\n direction = direction / (np.linalg.norm(direction, axis=1)[:, None] + 1e-5)\n\n print(lidar_all.shape, direction.shape, rgb_all.shape, label_all.shape)\n return lidar_all, direction, rgb_all, label_all\n\n\ndef generate_data_from_scene(scene_id):\n\n mode = \"headless\"\n config = os.path.join(igibson.example_path, \"configs/fetch_room_rearrangement.yaml\")\n nav_env = iGibsonEnv(\n config_file=config, mode=mode, scene_id=scene_id, action_timestep=1.0 / 120.0, physics_timestep=1.0 / 120.0\n )\n # data = []\n # for i in tqdm(range(5)):\n # data.append(generate_data_lidar(nav_env))\n\n # lidar_all = [item[0] for item in data]\n # direction = [item[1] for item in data]\n # rgb_all = [item[2] for item in data]\n # label_all = [item[3] for item in data]\n\n pts, direction, color, label = generate_data_lidar(nav_env)\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], s=3, c=color[:, :3])\n plt.show()\n\n # np.savez('/data2/point_cloud/data10_v2_{}.npz'.format(scene_id), lidar=lidar_all, direction=direction, rgb=rgb_all, label=label_all)\n\n\nif __name__ == \"__main__\":\n generate_data_from_scene(\"Rs_int\")\n\n# scenes = []\n# with open('scene_list', 'r') as f:\n# for line in f:\n# scenes.append(line.strip())\n\n# p = Pool(2)\n# p.map(generate_data_from_scene, scenes)\n"
] | [
[
"numpy.meshgrid",
"numpy.arange",
"numpy.eye",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.tan",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Spain-AI/dark_helper | [
"c2a5d774b455b2a374d6ca5e2715f7a560f5fe5b"
] | [
"face_lib/sort.py"
] | [
"\"\"\"\n SORT: A Simple, Online and Realtime Tracker\n Copyright (C) 2016-2020 Alex Bewley [email protected]\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nfrom filterpy.kalman import KalmanFilter\n\ntry:\n from numba import jit\nexcept:\n def jit(func):\n return func\n\nnp.random.seed(0)\n\n\ndef linear_assignment(cost_matrix):\n try:\n import lap\n _, x, y = lap.lapjv(cost_matrix, extend_cost=True)\n return np.array([[y[i],i] for i in x if i >= 0]) #\n except ImportError:\n from scipy.optimize import linear_sum_assignment\n x, y = linear_sum_assignment(cost_matrix)\n return np.array(list(zip(x, y)))\n\n\n@jit\ndef iou(bb_test, bb_gt):\n \"\"\"\n Computes IUO between two bboxes in the form [x1,y1,x2,y2]\n \"\"\"\n xx1 = np.maximum(bb_test[0], bb_gt[0])\n yy1 = np.maximum(bb_test[1], bb_gt[1])\n xx2 = np.minimum(bb_test[2], bb_gt[2])\n yy2 = np.minimum(bb_test[3], bb_gt[3])\n w = np.maximum(0., xx2 - xx1)\n h = np.maximum(0., yy2 - yy1)\n wh = w * h\n o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])\n + (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)\n return(o)\n\n\ndef convert_bbox_to_z(bbox):\n \"\"\"\n Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form\n [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is\n the aspect ratio\n \"\"\"\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w/2.\n y = bbox[1] + h/2.\n s = w * h #scale is just area\n r = w / float(h)\n return np.array([x, y, s, r]).reshape((4, 1))\n\n\ndef convert_x_to_bbox(x,score=None):\n \"\"\"\n Takes a bounding box in the centre form [x,y,s,r] and returns it in the form\n [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right\n \"\"\"\n w = np.sqrt(x[2] * x[3])\n h = x[2] / w\n if(score==None):\n return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))\n else:\n return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))\n\n\nclass KalmanBoxTracker(object):\n \"\"\"\n This class represents the internal state of individual tracked objects observed as bbox.\n \"\"\"\n count = 0\n def __init__(self,bbox,emb):\n \"\"\"\n Initialises a tracker using initial bounding box.\n \"\"\"\n #define constant velocity model\n self.kf = KalmanFilter(dim_x=7, dim_z=4) \n self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])\n self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])\n\n self.kf.R[2:,2:] *= 10.\n self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities\n self.kf.P *= 10.\n self.kf.Q[-1,-1] *= 0.01\n self.kf.Q[4:,4:] *= 0.01\n\n self.kf.x[:4] = convert_bbox_to_z(bbox)\n self.time_since_update = 0\n self.id = KalmanBoxTracker.count\n KalmanBoxTracker.count += 1\n self.history = []\n self.hits = 0\n self.hit_streak = 0\n self.age = 0\n self.emb = emb\n\n def update(self, bbox, emb):\n \"\"\"\n Updates the state vector with observed bbox.\n \"\"\"\n self.time_since_update = 0\n self.history = []\n self.hits += 1\n self.hit_streak += 1\n self.kf.update(convert_bbox_to_z(bbox))\n\n self.emb = 0.2 * emb + 0.8 * self.emb\n\n def predict(self):\n \"\"\"\n Advances the state vector and returns the predicted bounding box estimate.\n \"\"\"\n if((self.kf.x[6]+self.kf.x[2])<=0):\n self.kf.x[6] *= 0.0\n self.kf.predict()\n self.age += 1\n self.time_since_update += 1\n self.history.append(convert_x_to_bbox(self.kf.x))\n return self.history[-1]\n\n def get_state(self):\n \"\"\"\n Returns the current bounding box estimate.\n \"\"\"\n return convert_x_to_bbox(self.kf.x)\n\n\ndef associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):\n \"\"\"\n Assigns detections to tracked object (both represented as bounding boxes)\n\n Returns 3 lists of matches, unmatched_detections and unmatched_trackers\n \"\"\"\n if(len(trackers)==0):\n return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)\n iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)\n\n for d,det in enumerate(detections):\n for t,trk in enumerate(trackers):\n iou_matrix[d,t] = iou(det, trk)\n\n if min(iou_matrix.shape) > 0:\n a = (iou_matrix > iou_threshold).astype(np.int32)\n if a.sum(1).max() == 1 and a.sum(0).max() == 1:\n matched_indices = np.stack(np.where(a), axis=1)\n else:\n matched_indices = linear_assignment(-iou_matrix)\n else:\n matched_indices = np.empty(shape=(0,2))\n\n unmatched_detections = []\n for d, det in enumerate(detections):\n if(d not in matched_indices[:,0]):\n unmatched_detections.append(d)\n unmatched_trackers = []\n for t, trk in enumerate(trackers):\n if(t not in matched_indices[:,1]):\n unmatched_trackers.append(t)\n\n #filter out matched with low IOU\n matches = []\n for m in matched_indices:\n if(iou_matrix[m[0], m[1]]<iou_threshold):\n unmatched_detections.append(m[0])\n unmatched_trackers.append(m[1])\n else:\n matches.append(m.reshape(1,2))\n if(len(matches)==0):\n matches = np.empty((0,2),dtype=int)\n else:\n matches = np.concatenate(matches,axis=0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)\n\n\nclass Sort(object):\n def __init__(self, max_age=15, min_hits=3):\n \"\"\"\n Sets key parameters for SORT\n \"\"\"\n self.max_age = max_age\n self.min_hits = min_hits\n self.trackers = []\n self.frame_count = 0\n\n def update(self, dets=np.empty((0, 4)), embs=None):\n \"\"\"\n Params:\n dets - a numpy array of detections in the format [[x1,y1,x2,y2],[x1,y1,x2,y2],...]\n Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).\n Returns the a similar array, where the last column is the object ID.\n\n NOTE: The number of objects returned may differ from the number of detections provided.\n \"\"\"\n self.frame_count += 1\n # get predicted locations from existing trackers.\n trks = np.zeros((len(self.trackers), 4))\n to_del = []\n ret = []\n for t, trk in enumerate(trks):\n pos = self.trackers[t].predict()[0]\n trk[:] = [pos[0], pos[1], pos[2], pos[3]]\n if np.any(np.isnan(pos)):\n to_del.append(t)\n trks = np.ma.compress_rows(np.ma.masked_invalid(trks))\n for t in reversed(to_del):\n self.trackers.pop(t)\n matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)\n\n # update matched trackers with assigned detections\n for m in matched:\n self.trackers[m[1]].update(dets[m[0], :], embs[m[0]])\n\n # create and initialise new trackers for unmatched detections\n for i in unmatched_dets:\n trk = KalmanBoxTracker(dets[i,:], embs[i])\n self.trackers.append(trk)\n i = len(self.trackers)\n for trk in reversed(self.trackers):\n d = trk.get_state()[0]\n if (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):\n ret.append((np.concatenate((d, [trk.id + 1])), trk.emb)) # +1 as MOT benchmark requires positive\n i -= 1\n # remove dead tracklet\n if(trk.time_since_update > self.max_age):\n self.trackers.pop(i)\n if(len(ret)>0):\n return ret\n return []"
] | [
[
"numpy.maximum",
"numpy.sqrt",
"numpy.minimum",
"numpy.random.seed",
"numpy.isnan",
"numpy.concatenate",
"numpy.ma.masked_invalid",
"scipy.optimize.linear_sum_assignment",
"numpy.array",
"numpy.where",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
ashishgaurav13/cl_safer_classifiers | [
"a3df87a4bc863377485fa58a8a475991a4fc9800",
"a3df87a4bc863377485fa58a8a475991a4fc9800"
] | [
"utils/data_iterators/cifar100.py",
"utils/networks/network_si_pathint.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.backend import clear_session\nfrom keras.utils import to_categorical\nimport tensorflow.keras as keras\nfrom .common import save_pickle, load_pickle\nfrom tqdm import tqdm\n\n# utils/data_iterators/cifar100_ResNet44v1_model.171.h5 => flatten\n# utils/data_iterators/cifar100_ResNet44v1_model.171.h5 => activation_42\n\nclass CIFAR100_DataIterator:\n\n def __init__(self, train_data, test_data, batch_size = 32, \n randomize = True, task_labels = None,\n embedding_save_file = 'utils/data_iterators/cifar100_embedding.pkl',\n embedding_model_file = 'utils/data_iterators/cifar100_ResNet44v1_model.171.h5',\n embedding_model_layer = 'activation_42'): # 'flatten'):\n\n assert(task_labels != None)\n self.train_x, self.train_y = train_data\n self.n = len(self.train_y)\n print('Training examples = %d' % self.n)\n self.test_x, self.test_y = test_data\n self.tn = len(self.test_y)\n print('Test examples = %d' % self.tn)\n self.i = 0\n self.batch_size = batch_size\n print('Batch size = %d' % self.batch_size)\n self.randomize = randomize\n if randomize:\n idx = np.random.permutation(self.n)\n self.train_x = self.train_x[idx]\n self.train_y = self.train_y[idx]\n print('Shuffled training data')\n self.orig_data = (np.copy(self.train_x), np.copy(self.train_y),\n np.copy(self.test_x), np.copy(self.test_y))\n \n self.embedding_save_file = embedding_save_file\n self.embedding_model_file = embedding_model_file\n self.embedding_model_layer = embedding_model_layer\n self.reshape_dims = (64*8*8,) # (64,)\n self.convert_to_embeddings()\n\n self.n_tasks = len(task_labels)\n self.task_labels = task_labels\n self.n_labels_per_task = len(task_labels[0])\n for t in self.task_labels: assert(len(t) == self.n_labels_per_task)\n self.get_taskwise_data()\n self.switch_task(0)\n\n def img_fn_cifar100(img):\n image = np.zeros((32,32,3), dtype=np.uint8)\n image[...,0] = np.reshape(img[:1024], (32,32)) # Red channel\n image[...,1] = np.reshape(img[1024:2048], (32,32)) # Green channel\n image[...,2] = np.reshape(img[2048:], (32,32)) # Blue channel\n return image\n \n self.img_fn = img_fn_cifar100\n \n def iterative_fn(self, fn, dataset, batches = 100):\n ret = []\n n = dataset.shape[0]\n per_batch_size = n // batches\n for i in tqdm(range(batches)):\n if i+1 != batches:\n ret += [fn(dataset[i*per_batch_size:(i+1)*per_batch_size])]\n else:\n ret += [fn(dataset[i*per_batch_size:])]\n ret = np.vstack(ret)\n return ret\n\n def convert_to_embeddings(self):\n if os.path.isfile(self.embedding_save_file):\n print('Embedding file %s exists, skipping embedding generation.'\n % self.embedding_save_file)\n self.etrain_x, self.etest_x = load_pickle(self.embedding_save_file)\n else:\n assert(os.path.isfile(self.embedding_model_file))\n model = load_model(self.embedding_model_file)\n print(\"Loaded model: %s\" % self.embedding_model_file)\n train_x = self.train_x.astype('float32') / 255\n train_x_mean = np.mean(train_x, axis = 0)\n train_x -= train_x_mean\n test_x = self.test_x.astype('float32') / 255\n test_x -= train_x_mean\n results = model.evaluate(test_x, to_categorical(self.test_y))\n print(\"Test acc: %s\" % results)\n intermediate_layer = model.\\\n get_layer(self.embedding_model_layer).output\n embedding_model = keras.Model(\n inputs = model.input, outputs = intermediate_layer)\n assert(len(self.reshape_dims) == 1)\n dim = self.reshape_dims[0]\n fn = lambda x: np.reshape(embedding_model.predict(x), [-1, dim])\n self.etrain_x = self.iterative_fn(fn, train_x)\n self.etest_x = self.iterative_fn(fn, test_x)\n save_pickle([self.etrain_x, self.etest_x],\n savefile = self.embedding_save_file)\n clear_session()\n print('Loaded embeddings.')\n \n # Remap class labels eg. 33,2,4 => 0, 1, 2\n def remap(self, x, classnums):\n # print(x)\n x = np.squeeze(x)\n # curr_labels = np.unique(x)\n # new_labels = {label: i for i, label in enumerate(curr_labels)}\n new_labels = {label: i for i, label in enumerate(classnums)}\n x_remapped = np.copy(x)\n for i in range(x.shape[0]):\n x_remapped[i] = new_labels[x[i]]\n # print(np.unique(x), np.unique(x_remapped))\n return x_remapped, new_labels\n\n def get_taskwise_data(self):\n self.tasks = {}\n for i in range(self.n_tasks):\n self.tasks[i] = {}\n class_nums = self.task_labels[i]\n tr_indices = np.array([np.where(self.train_y == class_num)[0] for \\\n class_num in class_nums]).flatten()\n test_indices = np.array([np.where(self.test_y == class_num)[0] for \\\n class_num in class_nums]).flatten()\n self.tasks[i]['train_x'] = self.etrain_x[tr_indices]\n self.tasks[i]['img_train_x'] = self.train_x[tr_indices]\n self.tasks[i]['train_y'], tr_labels = self.remap(self.train_y[tr_indices], class_nums)\n self.tasks[i]['n'] = len(tr_indices)\n if self.randomize:\n idx = np.random.permutation(self.tasks[i]['n'])\n self.tasks[i]['train_x'] = self.tasks[i]['train_x'][idx]\n self.tasks[i]['img_train_x'] = self.tasks[i]['img_train_x'][idx]\n self.tasks[i]['train_y'] = self.tasks[i]['train_y'][idx]\n self.tasks[i]['test_x'] = self.etest_x[test_indices]\n self.tasks[i]['img_test_x'] = self.test_x[test_indices]\n self.tasks[i]['test_y'], test_labels = self.remap(self.test_y[test_indices], class_nums)\n self.tasks[i]['tn'] = len(test_indices)\n if self.randomize:\n idx = np.random.permutation(self.tasks[i]['tn'])\n self.tasks[i]['test_x'] = self.tasks[i]['test_x'][idx]\n self.tasks[i]['img_test_x'] = self.tasks[i]['img_test_x'][idx]\n self.tasks[i]['test_y'] = self.tasks[i]['test_y'][idx]\n assert(tr_labels == test_labels)\n\n def switch_task(self, new_task_idx):\n assert(0 <= new_task_idx < self.n_tasks)\n self.curr_idx = new_task_idx\n self.n = self.tasks[self.curr_idx]['n']\n self.tn = self.tasks[self.curr_idx]['tn']\n self.train_x = self.tasks[self.curr_idx]['train_x']\n self.img_train_x = self.tasks[self.curr_idx]['img_train_x']\n self.train_y = np.squeeze(self.tasks[self.curr_idx]['train_y'])\n self.test_x = self.tasks[self.curr_idx]['test_x']\n self.img_test_x = self.tasks[self.curr_idx]['img_test_x']\n self.test_y = np.squeeze(self.tasks[self.curr_idx]['test_y'])\n # print('switch to %d: %s' % (new_task_idx, np.unique(self.test_y)))\n \n def inspect(self):\n\n print('inspect')\n\n r, c = self.n_tasks, self.n_labels_per_task\n xw = min(15, c)\n yw = max(1.5*r, 10)\n fig = plt.figure(figsize = (xw, yw))\n subplot_i = 0\n \n for task in range(self.n_tasks):\n self.switch_task(task)\n classes_to_show = np.unique(self.test_y)\n all_indices = [np.where(self.test_y == class_num)[0] for class_num in classes_to_show]\n n_ex = [len(item) for item in all_indices]\n example_indices = [np.random.choice(item) for item in all_indices]\n examples = self.img_test_x[example_indices]\n\n for i, img_idx in enumerate(classes_to_show):\n ax = fig.add_subplot(r, c, subplot_i+1)\n ax.set_xticks(())\n ax.set_yticks(())\n label_human_readable = str(img_idx) # TODO\n img = examples[img_idx]\n ax.set_xlabel(label_human_readable)\n plt.imshow(img, cmap='gray', interpolation='none')\n subplot_i += 1\n\n # plt.tight_layout(True)\n plt.savefig(\"inspect.png\")\n plt.show()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.i+self.batch_size > self.n:\n self.i = 0\n ret_data = self.train_x[self.i:self.i+self.batch_size]\n ret_labels = self.train_y[self.i:self.i+self.batch_size]\n self.i += self.batch_size\n return ret_data, ret_labels\n \n def test(self, samples = 32):\n idx = np.random.choice(self.tn, size = samples, replace = False)\n return self.test_x[idx], self.test_y[idx]",
"import os, sys\nsys.path.extend([os.path.expanduser(\n os.path.abspath('./utils/networks/synaptic_intelligence/')\n)])\nfrom pathint import utils as putils\nimport utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom pathint import protocols\nfrom pathint.optimizers import KOOptimizer\nfrom keras.optimizers import SGD, Adam, RMSprop\nfrom keras.callbacks import Callback\nfrom pathint.keras_utils import LossHistory\nimport numpy as np\nfrom keras.backend.tensorflow_backend import set_session\nimport tensorflow as tf\n# from keras_tqdm import TQDMNotebookCallback\n\nclass SINetwork:\n\n def __init__(self, layer_sizes, feature_extractor_needed = False, use_dropout = False,\n activation = 'relu', dropoutv = 0.5, reshape_dims = None, seed = 0, \n session_config = None, it = None, c = 1.0, xi = 0.1, lr = 0.001):\n\n assert(len(layer_sizes) == 4)\n assert(session_config != None)\n assert(it != None)\n self.layer_sizes = layer_sizes\n self.feature_extractor_needed = feature_extractor_needed\n self.use_dropout = use_dropout\n self.dropoutv = dropoutv\n self.reshape_dims = reshape_dims\n self.seed = seed\n self.session_config = session_config\n self.it = it\n\n self.use_dropout = use_dropout\n self.activation = utils.get_activation(activation)\n\n print(\"Using feature extractor: %s\" % self.feature_extractor_needed)\n print(\"Using dropout, bn: %s, %f\" % (self.use_dropout, self.dropoutv))\n\n self.phs = {}\n self.vars = {}\n self.objs = {}\n self.all_predictions = []\n\n self.c = c\n self.xi = xi\n self.lr = float(lr)\n\n def apply_feature_extractor(self, X):\n\n if self.feature_extractor_needed:\n if not hasattr(self, 'feature_extractor_set'):\n with tf.variable_scope(\"feature_extractor\"):\n X, created_layers = utils.vgg16(X, self.training_ph)\n self.feature_extractor_set = True\n self.feature_extractor_layers = created_layers\n else:\n print(\"Reusing feature extractor\")\n with tf.variable_scope(\"feature_extractor\", reuse = True):\n X = utils.vgg16_reuse(X, self.training_ph, self.feature_extractor_layers)\n else:\n X = tf.reshape(X, [-1, self.layer_sizes[0]])\n\n return X\n \n def get_trainable_vars(self, scope = \"\", silent = False):\n\n var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = scope)\n if self.feature_extractor_needed:\n new_var_list = []\n for item in var_list:\n if not item.name.startswith(\"feature_extractor\"):\n new_var_list += [item]\n var_list = new_var_list\n if not silent:\n print(\"Trainable vars: %s\" % str(var_list))\n return var_list\n\n def create_session(self, improve_by = 5, min_epoch = 10):\n self.objs['saver'] = tf.train.Saver()\n # self.objs['sess'] = tf.Session(config = self.session_config)\n self.objs['sess'] = tf.InteractiveSession()\n self.objs['sess'].run(tf.global_variables_initializer())\n self.objs['es'] = utils.EarlyStopping(\n self.objs['sess'], \n self.objs['saver'],\n save_dir = \"saved_seed%d\" % self.seed,\n improve_by = improve_by,\n min_epoch = min_epoch\n )\n\n if self.feature_extractor_needed:\n if not os.path.exists(\"vgg16_cifar100\"):\n print(\"Pretrained model doesnt exist for VGG16\")\n print(\"Run cifar100.py first\")\n exit(0)\n else:\n reqd_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = \"feature_extractor\")\n feature_extractor_saver = tf.train.Saver(reqd_variables)\n print(\"Restoring feature extractor variables\")\n feature_extractor_saver.restore(self.objs['sess'], \"vgg16_cifar100/saved.ckpt\")\n print(\"Done\")\n\n # self.objs['sess'].graph.finalize()\n \n def print_vars(self, var_list, show_values = False, extra = False):\n for num, var in enumerate(var_list):\n print_strs = []\n if show_values:\n if 'sess' in self.objs:\n red_sum = self.objs['sess'].run(tf.reduce_sum(var))\n print_strs += [\"mag %f\" % (red_sum)]\n else:\n print_strs += [\"init\"]\n if extra:\n if 'sess' in self.objs:\n nonzerovar = tf.boolean_mask(var, tf.greater(var, 0.000001))\n tmin = self.objs['sess'].run(tf.math.reduce_max(nonzerovar))\n print_strs += [\"tmax %f\" % tmin]\n nz = self.objs['sess'].run(tf.math.count_nonzero(var))\n print_strs += [\"nonzero %d\" % nz]\n num_elements = self.objs['sess'].run(tf.reduce_sum(tf.ones_like(var)))\n print_strs += [\"total %d\" % num_elements]\n print_str = \"\\t(%d) %s\" % (num+1, var.name)\n if len(print_strs) > 0:\n print_str += \" => %s\" % \" , \".join(print_strs)\n print(print_str)\n print(\"Number of vars: %d\" % len(var_list))\n\n def setup(self):\n\n set_session(tf.Session(config = self.session_config))\n activation_fn = self.activation\n self.model = Sequential()\n self.model.add(Dense(self.layer_sizes[1], activation=activation_fn, input_dim=self.layer_sizes[0]))\n if self.use_dropout: self.model.add(Dropout(self.dropoutv))\n self.model.add(Dense(self.layer_sizes[2], activation=activation_fn))\n if self.use_dropout: self.model.add(Dropout(self.dropoutv))\n self.model.add(Dense(self.layer_sizes[3], activation='softmax'))\n\n protocol_name, protocol = protocols.PATH_INT_PROTOCOL(omega_decay='sum', xi=self.xi)\n self.opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999)\n opt_name = 'adam'\n\n self.oopt = KOOptimizer(self.opt, model=self.model, **protocol)\n self.model.compile(loss=\"categorical_crossentropy\", optimizer=self.oopt, metrics=['accuracy'])\n\n history = LossHistory()\n # tqdm_callback = TQDMNotebookCallback()\n self.extra_callbacks = [history] #, tqdm_callback]\n\n print(\"Setting cval to %g\" % self.c)\n self.oopt.set_strength(self.c)\n \n def preprocessed_(self, task_num, x):\n\n if not hasattr(self, 'all_data'):\n self.all_data = {}\n self.all_data[task_num] = x\n print('Added preprocessed data for task %d' % task_num)\n\n def train_epoch(self, n_task, epoch, silent = False):\n\n stuffs = self.model.fit(\n self.all_data[n_task]['train_x'], \n self.all_data[n_task]['train_y'],\n batch_size = self.it.batch_size, \n callbacks = self.extra_callbacks,\n epochs = 1, \n verbose = 0, \n validation_data = (\n self.all_data[n_task]['test_x'],\n self.all_data[n_task]['test_y'],\n ))\n \n # TODO: dropout\n\n avg_tr_loss = stuffs.history['loss'][0]\n avg_tr_acc = stuffs.history['acc'][0]\n test_acc = stuffs.history['val_acc'][0]\n\n task_accs_all = self.accuracies(n_task+1)\n task_accs = np.average(task_accs_all)\n \n pred_n_tasks = self.predictions(n_task+1)\n self.all_predictions += [pred_n_tasks]\n behs = [self.beh(i) for i in range(n_task)] # Check beh on n_task-1 tasks\n beh_str = \"Beh: \" + str(behs)\n del(self.all_predictions[-1])\n\n print(\"Epoch: %d, Acc: %.2f%%, ValAcc: %.2f%%, Loss: %f\" % (\n epoch+1, avg_tr_acc * 100.0, test_acc * 100.0, avg_tr_loss))\n if not silent:\n print(\"PastValAcc(%d): %s => %.2f%%\" % (n_task+1,\n \" \".join([\"%.2f%%\" % item for item in task_accs_all]), task_accs))\n print(\"%s\" % beh_str)\n\n return task_accs \n\n def accuracies(self, n):\n task_accuracies = []\n for i in range(n):\n test_loss, test_acc = self.model.evaluate(\n self.all_data[i]['test_x'],\n self.all_data[i]['test_y'],\n verbose = 0\n )\n task_accuracies.append(test_acc * 100.0)\n return task_accuracies\n\n def predictions(self, n):\n pred_n_tasks = []\n for i in range(n):\n preds = np.argmax(self.model.predict(\n self.all_data[i]['test_x'],\n verbose = 0\n ), axis = 1).astype('uint8')\n pred_n_tasks.append(preds)\n return pred_n_tasks\n\n def beh_show(self):\n print_str = \"%d rows: \" % len(self.all_predictions)\n print_str += \"%s\" % str([len(item) for item in self.all_predictions])\n print(print_str)\n\n # Behavior across n iterations\n def beh(self, i):\n if len(self.all_predictions) <= 1:\n print(\"Not enough data for behaviour analysis\")\n return\n # self.beh_show()\n # print(\"Access rows %d:%d -> col %d\" % (i, len(self.all_predictions)-1, i))\n chosen_classifications = [item[i] for item in self.all_predictions[i:]] # only consider data from index i+\n n = len(chosen_classifications[0])\n same = 0\n for ii in range(n):\n reqd = chosen_classifications[0][ii]\n is_same = True\n for item in chosen_classifications[1:]:\n if item[ii] != reqd:\n is_same = False\n break\n if is_same:\n same += 1\n return round(same*100.0/n, 2)\n\n def final_stats(self, n_task, n_permute_tasks): \n task_accs = self.accuracies(n_permute_tasks)\n pred_n_tasks = self.predictions(n_permute_tasks)\n self.all_predictions += [pred_n_tasks]\n behs = [self.beh(i) for i in range(n_task)] # Check beh on n_task-1 tasks\n print(\"Final beh: \" + str(behs))\n task_accs = [\"%.2f%%\" % item for item in task_accs]\n print(\"Task accuracies: \" + \" \".join(task_accs))\n"
] | [
[
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.imshow",
"numpy.random.choice",
"numpy.reshape",
"numpy.unique",
"numpy.squeeze",
"matplotlib.pyplot.savefig",
"tensorflow.keras.Model",
"numpy.copy",
"numpy.random.permutation",
"numpy.mean",
"tensorflow.keras.backend.clear_session",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.where",
"numpy.vstack",
"matplotlib.pyplot.figure"
],
[
"tensorflow.InteractiveSession",
"tensorflow.math.count_nonzero",
"tensorflow.greater",
"tensorflow.get_collection",
"tensorflow.reduce_sum",
"tensorflow.math.reduce_max",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
pranavgo/gennav | [
"fc57707912c6f1c6af208a30b2ab0ad78c2cc798"
] | [
"gennav/envs/binaryOccupancyGrid2D_env.py"
] | [
"import numpy as np\nfrom gennav.envs.base import Environment\nfrom gennav.utils.common import RobotState\nfrom gennav.utils.geometry import Point\nfrom matplotlib import pyplot as plt\n\n\nclass BinaryOccupancyGrid2DEnv(Environment):\n \"\"\"Base class for a Binary Occupancy Grid 2D envrionment.\n\n Arguments:\n X (unsigned int) : The number of grid cells in the x-direction\n Y (unsigned int) : the number of grid cells in the y-direction\n \"\"\"\n\n def __init__(self, X=10, Y=10):\n super(BinaryOccupancyGrid2DEnv, self).__init__()\n self.X = X\n self.Y = Y\n self.scan = None\n self.robotPose = None\n self.scale = 5\n self.grid = np.zeros((self.X * self.scale, self.Y * self.scale))\n\n # Storing transforms\n self.transforms = {}\n self.mapTbot = {\n \"from\": \"map\",\n \"to\": \"bot\",\n \"transform\": self.scale\n * np.array(\n [[1, 0, int(self.X / 2)], [0, 1, int(self.Y / 2)], [0, 0, 1]]\n ).reshape(3, 3),\n }\n self.botTworld = {\"from\": \"bot\", \"to\": \"world\", \"transform\": np.empty((3, 3))}\n self.mapTworld = {\n \"from\": \"map\",\n \"to\": \"world\",\n \"transform\": np.dot(self.mapTbot[\"transform\"], self.botTworld[\"transform\"]),\n }\n self.transforms[\"mapTbot\"] = self.mapTbot\n self.transforms[\"botTworld\"] = self.botTworld\n self.transforms[\"mapTworld\"] = self.mapTworld\n\n def update(self, scan, robotPose):\n \"\"\"Function to update the environment\n Args:\n scan (list) : List of ang_min, ang_max, ranges\n robotPose (gennav.utils.RobotPose) : Current RobotPose\n \"\"\"\n self.scan = scan\n self.robotPose = robotPose\n self.compute_transforms()\n self.fillOccupancy()\n\n def fillOccupancy(self):\n \"\"\"Function that fill the occupnacy grid on every update\n Assumptions:\n 1. RobotPose is considered (0, 0, 0) to accomodate the laser scan, which produces ranges wrt to the bot\n 2. The RobotPose in the occupancy grid is (X * scale_factor/2, Y * scale_factor /2, 0)\n 3. The attribute robotPose is the real pose of the robot wrt to the world Frame,\n thus it helps us to calculate the transform for trajectory and pose validity queries\n \"\"\"\n self.grid[:] = 0\n ang_min, ang_max, ranges = self.scan\n angle_step = (ang_max - ang_min) / len(ranges)\n for i, rng in enumerate(ranges):\n\n # Check for obstacles\n if np.abs(rng) is not np.inf:\n x, y = (\n rng * np.cos(ang_min + i * angle_step),\n rng * np.sin(ang_max + i * angle_step),\n )\n newState = self.transform(\"bot\", \"map\", RobotState(Point(x, y, 0)))\n x_, y_ = newState.position.x, newState.position.y\n\n # Checking if the range is within the grid, to mark them as occupied\n if 0 <= x_ < self.grid.shape[0] and 0 <= y_ < self.grid.shape[1]:\n if self.grid[int(x_)][int(-y_ - 1)] != 1:\n self.grid[int(x_)][int(-y_ - 1)] = 1\n\n def get_status(self, state):\n \"\"\"Get whether a given state is valid within the environment.\n\n Method for checking the validity of a given RobotPose in the environment.\n\n Args:\n state (gennav.utils.RobotState): State to be checked\n\n Returns:\n bool: True if state is valid otherwise False\n \"\"\"\n state = self.transform(\"world\", \"map\", state)\n x, y = state.position.x, state.position.y\n if self.grid[x][-y - 1] == 1:\n return False\n else:\n return True\n\n def get_traj_status(self, traj):\n \"\"\"Get whether a given trajectory is valid within the environment.\n\n Method for checking the validity of a trajectory in the given environment.\n\n Args:\n state (gennav.utils.Trajectory): Trajectory to be checked\n\n Returns:\n bool: True if state is valid otherwise False\n \"\"\"\n collision = False\n for i in range(len(traj.path) - 1):\n collision = self.check_line_segment(\n self.transform(\"world\", \"map\", traj.path[i]),\n self.transform(\"world\", \"map\", traj.path[i + 1]),\n )\n if collision:\n break\n return not collision\n\n def transform(self, frame1, frame2, rsf1):\n \"\"\"Transform robotPose from one pose to the other\n\n Args:\n frame1 (string) : from the frame (world, bot, map)\n frame2 (string) : to the frame (world, bot, map)\n rsf1 (gennav.utils.common.RobotState) : RobotState in frame1\n Returns:\n rsf2 (gennav.utils.common.RobotState) : RobotState in frame2\n \"\"\"\n # TODO: Make it more robust in terms of checking frames\n\n # Check if the required trnasform or the inverse of the transform exists\n frame = frame2 + \"T\" + frame1\n frame_inv = frame1 + \"T\" + frame2\n if frame in self.transforms.keys():\n t_matrix = self.transforms[frame][\"transform\"]\n elif frame_inv in self.transforms.keys():\n t_matrix = np.linalg.inv(self.transforms[frame_inv][\"transform\"])\n else:\n raise Exception(\"Transform for the frames not found\")\n\n # Transform using matrix multiplication\n pf2 = np.dot(\n t_matrix, np.array([rsf1.position.x, rsf1.position.y, 1]).reshape(3, 1)\n )\n rsf2 = RobotState(position=Point(pf2[0].item(), pf2[1].item()))\n\n # Return RobotState\n return rsf2\n\n def compute_transforms(self):\n \"\"\"Computes transforms between frames\n\n Uses robot pose to compute transform between the world frame and the bot frame\n \"\"\"\n x, y, yaw = (\n self.robotPose.position.x,\n self.robotPose.position.y,\n self.robotPose.orientation.yaw,\n )\n worldTbot = np.array(\n [[np.cos(yaw), -np.sin(yaw), x], [np.sin(yaw), np.cos(yaw), y], [0, 0, 1]]\n ).reshape(3, 3)\n self.botTworld[\"transform\"] = np.linalg.inv(worldTbot)\n self.mapTworld[\"transform\"] = np.dot(\n self.mapTbot[\"transform\"], self.botTworld[\"transform\"]\n )\n\n def visualise_grid(self):\n \"\"\"\n Helper function to visualise grid\n \"\"\"\n plt.imshow(self.grid, origin=\"bottom\", cmap=\"binary\")\n plt.show()\n\n def check_line_segment(self, state1, state2):\n \"\"\"Checks whether a line segment is collision free in the environent\n\n Computes a line segment from the start point to the end point and\n parametrically checks if the grid cells they occupy are occupied.\n\n Args:\n state1 (gennav.utils.common.RobotState) : One end point\n state2 (gennav.utils.common.RobotState) : The other end point\n \"\"\"\n point1 = state1.position\n point2 = state2.position\n x1, y1 = point1.x, point1.y\n x2, y2 = point2.x, point2.y\n m = (y2 - y1) / (x2 - x1)\n collision = False\n for x in np.arange(x1, x2, 0.5):\n y = m * x - m * x1 + y1\n if self.grid[int(x)][int(-y - 1)] == 1:\n collision = True\n break\n return collision\n"
] | [
[
"numpy.dot",
"matplotlib.pyplot.imshow",
"numpy.abs",
"numpy.linalg.inv",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elena-kolomeets/lowfat | [
"f7647f5cd12519f722e41808157a96cc3e37b6ce"
] | [
"lowfat/management/commands/load2019applications.py"
] | [
"import pandas as pd\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import BaseUserManager\nfrom django.core.management.base import BaseCommand\nfrom django.db import IntegrityError\n\nfrom lowfat.models import Claimant\n\nclass Command(BaseCommand):\n help = \"Import CSV with 2019 applications.\"\n\n def add_arguments(self, parser):\n parser.add_argument('csv', nargs='?', default='2019.csv')\n\n # pylint: disable=too-many-branches,too-many-locals\n def handle(self, *args, **options):\n fail_list = []\n success_list = []\n user_manager = BaseUserManager()\n\n data = pd.read_csv(options['csv'])\n for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable\n\n received_offer = line['Invited'] == 'YES'\n if line[\"Research Classification\"] == \"N/A - I do not do research\":\n jacs = \"Y0\"\n else:\n jacs = line[\"Research Classification\"][1:3]\n\n applicants_dict = {\n \"application_year\": 2018,\n \"fellow\": False,\n \"received_offer\": received_offer,\n \"forenames\": line[\"First name\"],\n \"surname\": line[\"Surname\"],\n \"affiliation\": line[\"Home Institution\"],\n \"department\": line[\"Department\"] if pd.notnull(line[\"Department\"]) else \"\",\n \"group\": line[\"Group within Department\"] if pd.notnull(line[\"Group within Department\"]) else \"\",\n \"career_stage_when_apply\": line[\"Career stage\"][6],\n \"job_title_when_apply\": line[\"Job Title\"],\n \"research_area\": line[\"Area of work\"],\n \"research_area_code\": jacs,\n \"email\": line[\"Email Address\"],\n \"phone\": line[\"Telephone number\"],\n \"gender\": line[\"Gender\"][0] if pd.notnull(line[\"Gender\"]) else 'R',\n \"home_country\": \"GB\",\n \"home_city\": \"Unknow\",\n \"funding\": line[\"Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work\"],\n \"funding_notes\": line[\"Which additional funding body/charity/organisation would you probably turn to if seeking financial support for your research/work\"] if pd.notnull(line[\"Which additional funding body/charity/organisation would you probably turn to if seeking financial support for your research/work\"]) else \"\",\n \"claimantship_grant\": 3000 if received_offer else 0,\n \"institutional_website\": line[\"Please specify your Institutional webpage\"] if pd.notnull(line[\"Please specify your Institutional webpage\"]) else \"\",\n \"website\": line[\"Please specify your blog\"] if pd.notnull(line[\"Please specify your blog\"]) else \"\",\n \"orcid\": line[\"Please specify your ORCID\"] if pd.notnull(line[\"Please specify your ORCID\"]) else \"\",\n \"google_scholar\": line[\"Please specify your Google Scholar\"] if pd.notnull(line[\"Please specify your Google Scholar\"]) else \"\",\n \"twitter\": line[\"Please specify your Twitter handle\"] if pd.notnull(line[\"Please specify your Twitter handle\"]) else \"\",\n \"screencast_url\": line[\"Application Screencast URL\"] if pd.notnull(line[\"Application Screencast URL\"]) else \"\",\n \"example_of_writing_url\": line[\"Example of writing\"] if pd.notnull(line[\"Example of writing\"]) else \"\",\n }\n\n try:\n applicant = Claimant(**applicants_dict)\n applicant.save()\n success_list.append(index)\n\n if received_offer:\n new_user = get_user_model().objects.create_user(\n username=applicant.slug,\n email=applicant.email,\n password=user_manager.make_random_password(),\n first_name=line[\"First name\"],\n last_name=line[\"Surname\"]\n )\n applicant.user = new_user\n applicant.save()\n\n except IntegrityError as exception:\n try:\n applicant = Claimant.objects.get(\n email=applicants_dict[\"email\"]\n )\n for key, value in applicants_dict.items():\n applicant[key] = value\n\n applicant.save()\n success_list.append(index)\n\n if received_offer:\n new_user = get_user_model().objects.create_user(\n username=applicant.slug,\n email=applicant.email,\n password=user_manager.make_random_password(),\n first_name=line[\"First name\"],\n last_name=line[\"Surname\"]\n )\n applicant.user = new_user\n applicant.save()\n\n except BaseException as exception:\n print(\"Error: {}\\n{}\\n{}\".format(exception, line, 80 * \"-\"))\n fail_list.append(index)\n\n except BaseException as exception:\n print(\"Error: {}\\n{}\\n{}\".format(exception, line, 80 * \"-\"))\n fail_list.append(index)\n\n print(80 * \"-\")\n print(\"Success: {}\".format(success_list))\n print(\"Fail: {}\".format(fail_list))\n"
] | [
[
"pandas.notnull",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
meracan/s3-netcdf-api | [
"920d09ef7b1a205230ea2c76eabcb4853616992c"
] | [
"test/other/dataTest.old.py"
] | [
"import numpy as np\nfrom datetime import datetime\n\nnpe=3\nnelem=20\nnnode=10\nnstation=27\nnsnode=3\nntime=8760\nnfreq=3\nndir=5\n\nelem=np.arange(nelem*npe,dtype=\"i4\").reshape((nelem,npe))\ntime=np.datetime64(datetime(2000,1,1))+np.arange((ntime))*np.timedelta64(1, 'h')\nlat=np.arange((nnode),dtype=\"f8\")\nlon=np.arange((nnode),dtype=\"f8\")\nnodes=np.column_stack((lon,lat))\nbed=np.arange((nnode),dtype=\"f4\")\nslat=np.arange((nstation),dtype=\"f8\")\nslon=np.arange((nstation),dtype=\"f8\")\nfreq=np.arange((nfreq),dtype=\"f8\")\ndir=np.arange((ndir),dtype=\"f8\")\n\nnshape=ntime*nnode\nshape=(ntime,nnode)\nvariables={\n \"WIND\":{\n \"Windv_x\":np.arange(nshape,dtype=\"f4\").reshape(shape),\n \"Windv_y\":np.arange(nshape,dtype=\"f4\").reshape(shape),\n },\n \"HS\":{\"Hsig\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"DIR\":{ \"Dir\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \n \"TPS\":{\"TPsmoo\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"TMM10\":{\"Tm_10\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"TM01\":{\"Tm01\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"TM02\":{\"Tm02\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \n \"PDIR\":{\"PkDir\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"DSPR\":{\"Dspr\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"QP\":{\"Qp\":np.arange(nshape,dtype=\"f4\").reshape(shape),},\n \"TRANSP\":{\"Transp_x\":np.arange(nshape,dtype=\"f4\").reshape(shape),\"Transp_y\":np.arange(nshape,dtype=\"f4\").reshape(shape),}\n \n }\n\n\nnshape=nstation*nsnode*ntime*nfreq*ndir\nshape=(nstation,nsnode,ntime,nfreq,ndir)\n\nspcgroup={\n \"spectra\":(np.arange(nshape,dtype=\"f8\")).reshape(shape)\n}\n\nstations={\n \"beverly\": 1,\n \"brooks\": 1,\n \"c_dixon\": 1,\n \"c_eliz\": 1,\n \"campbell\": 1,\n \"e_dell\": 1,\n \"hotspots\": 2,\n \"line_n\": 2,\n \"line_w\": 3,\n \"line_s\": 2,\n \"m_nomad\": 1,\n \"n_hecat\": 1,\n \"ne_isle\": 1,\n \"neah\": 2,\n \"p_renf\": 1,\n \"perouse\": 1,\n \"s_hecat\": 1,\n \"s_morsb\": 1,\n \"s_nomad\": 1,\n \"sombrio\": 1,\n \"sooke\": 1,\n \"tarbotn\": 1,\n \"tillamk\": 1,\n \"tofino\": 1,\n \"w_dixon\": 1,\n \"w_morsb\": 1,\n \"w_otter\": 1,\n \"w_washn\": 1\n}\n# Create lat lng for each station\nfor i,id in enumerate(stations):\n c=np.array([[1.0,1.0]])\n stations[id]={\"id\":i,\"nsnodes\":stations[id],\"latlng\":((np.arange(stations[id])+1)*i)[:,np.newaxis]*c}"
] | [
[
"numpy.timedelta64",
"numpy.arange",
"numpy.array",
"numpy.column_stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cafe-com-analytics/stock_market_index_daily_direction | [
"e05eced04d3f0ae3134315de0163bfdf140c1e4a"
] | [
"src/features/build_features.py"
] | [
"import numpy as np\nimport pandas as pd\nimport yfinance as yf\n\n\ndef downloading_stocks_data(dct, start_date: str = \"2021-01-01\", end_date: str = \"2021-07-01\") -> pd.DataFrame:\n \"\"\"\n Download the stocks daily information from tickers listed as keys of a dictionary, gets only \"Close\" price from\n each day within start_date and end_date.\n\n Args:\n dct (dict): format {'ticker': {'name': name, etc}}\n start_date (str, optional): [description]. Defaults to \"2011-01-01\".\n end_date (str, optional): [description]. Defaults to \"2022-01-01\".\n\n Returns:\n pd.DataFrame: dataframe of close prices of each ticker.\n \"\"\"\n df = yf.download(list(dct.keys())[0], start=start_date, end=end_date, show_errors=False)[[\"Close\"]]\n df.columns = [dct[list(dct.keys())[0]][\"name\"]]\n\n for market_index in list(dct.keys())[1:]:\n df_temp = yf.download(market_index, start=start_date, end=end_date)[[\"Close\"]]\n df_temp.columns = [dct[market_index][\"name\"]]\n df = df.merge(df_temp, how='left', left_index=True, right_index=True)\n\n df.dropna(how='all', axis=0, inplace=True)\n df.fillna(method='ffill', inplace=True)\n df.fillna(method='bfill', inplace=True)\n\n return df\n\n\ndef daily_return(df, lst_columns: list = 'all') -> pd.DataFrame:\n \"\"\"\n Return the daily return of the lst_columns.\n \"\"\"\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n for column in lst_columns:\n df[column] = (np.log(df[column]) - np.log(df[column].shift(periods=1)))*100\n\n df.dropna(axis=0, how='all', inplace=True)\n\n return df\n\n\ndef return_in_period(df, lst_columns: list = 'all') -> pd.DataFrame:\n \"\"\"\n Return the return of the lst_columns.\n \"\"\"\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n for column in lst_columns:\n df[column] = df[column]/df[column][0]\n\n return df\n\n\ndef create_shifted_rt(df, rts: list, column_name: str = 'Close') -> pd.DataFrame:\n \"\"\"\n Return a dataframe with new lagged columns according to a rts' list.\n\n Args:\n df (pd.DataFrame): [description]\n rts (list): list with int values. Each value represents a lag in period.\n column_name (str, optional): [description]. Defaults to 'Close'.\n\n Returns:\n pd.DataFrame: [description]\n \"\"\"\n for t in rts:\n df[f\"rt-{t}\"] = df[column_name].shift(periods=t)\n return df\n\n\ndef uniform_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:\n \"\"\"This function creates the target \"Cluster\" according to the limits described in (2011, Zuo and Kita).\"\"\"\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n for column in lst_columns:\n conditions = [\n df[column] < -1.12,\n (df[column] >= -1.12) & (df[column] < -0.42),\n (df[column] >= -0.42) & (df[column] < 0),\n (df[column] >= 0) & (df[column] < 0.44),\n (df[column] >= 0.44) & (df[column] < 1.07),\n df[column] >= 1.07]\n\n choices = [1, 2, 3, 4, 5, 6]\n df[\"cluster_\"+column] = np.select(conditions, choices, default=np.nan)\n\n return df\n\n\ndef binary_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:\n \"\"\"\n This function creates the target \"Cluster\" according to the limits described in article.\n\n Args:\n df (pd.DataFrame): [description]\n lst_columns (list): [description]\n\n Returns:\n pd.DataFrame: return 'cluster_'+column with values 1 for positive return and 0 for equal or below zero.\n \"\"\"\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n for column in lst_columns:\n df[\"cluster_\"+column] = np.where(df[column] > 0, 1, 0)\n\n return df\n\n\ndef boxplot_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:\n if lst_columns == 'all':\n lst_columns = df.columns.tolist()\n elif isinstance(lst_columns, list):\n pass\n else:\n lst_columns = list(lst_columns)\n\n df_boxplot = df.describe().T\n quartile_1 = df_boxplot[\"25%\"][0]\n quartile_2 = df_boxplot[\"50%\"][0]\n quartile_3 = df_boxplot[\"75%\"][0]\n\n for column in lst_columns:\n conditions = [\n (df[column] < quartile_1),\n (df[column] >= quartile_1) & (df[column] < quartile_2),\n (df[column] >= quartile_2) & (df[column] < quartile_3),\n (df[column] >= quartile_3)]\n\n choices = [int(1), int(2), int(3), int(4)]\n df[\"cluster_\"+column] = np.select(conditions, choices, default=np.nan)\n\n return df\n"
] | [
[
"numpy.log",
"numpy.where",
"numpy.select"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
clear-nus/BOIRL | [
"cc872111fda3c7b8118e1a864831013c30f63948",
"cc872111fda3c7b8118e1a864831013c30f63948",
"cc872111fda3c7b8118e1a864831013c30f63948"
] | [
"bayesian_irl/src/utils/prob_dists.py",
"inverse_rl/algos/irl_batch_polopt.py",
"gym-sweden/gym_sweden/envs/swedenworld_env.py"
] | [
"import scipy.stats\nimport numpy as np\nfrom scipy.stats import multivariate_normal as MVG\n\nclass UniformDist:\n def __init__(self, xmax=1., xmin=None):\n self.xmax = xmax\n self.xmin = - xmax if xmin is None else xmin\n self.prob = 1 / (self.xmax - self.xmin)\n\n def __call__(self, *args, **kwargs):\n return self.prob\n\n def __str__(self):\n return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)\n\nclass MultiuniformDist:\n def __init__(self, xmax=[2.,10.], xmin=[-2.,-10.]):\n self.xmax = xmax\n self.xmin = - xmax if xmin is None else xmin\n self.prob = (1 / (self.xmax[0] - self.xmin[0]))*(1 / (self.xmax[1] - self.xmin[1]))\n\n def __call__(self, *args, **kwargs):\n return self.prob\n\n def __str__(self):\n return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)\n\nclass MultiuniformborlangeDist:\n def __init__(self, xmax=[0., 0.], xmin=[-2.5,-2.5]):\n self.xmax = xmax\n self.xmin = - xmax if xmin is None else xmin\n self.prob = (1 / (self.xmax[0] - self.xmin[0]))*(1 / (self.xmax[1] - self.xmin[1]))\n\n def __call__(self, *args, **kwargs):\n return self.prob\n\n def __str__(self):\n return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)\n\n\nclass DistBase:\n def __init__(self, dist, params):\n self.dist = dist\n self.params = params\n\n def __call__(self, x):\n \"\"\"\n :x: input\n :return: P(x)\n \"\"\"\n return np.exp(np.sum(self.dist.logpdf(x, **self.params)))\n\n def sample(self, size=10):\n return self.dist.rvs(size=size, **self.params)\n\n def __str__(self):\n return self.__class__.__name__ + '(' + ', '.join(['{}={}'.format(key, value)\n for key, value in self.params.items()]) + ')'\n\n\nclass GaussianDist(DistBase):\n def __init__(self, loc=0, scale=0.1):\n \"\"\"\n :param loc: location of gaussian distribution\n :param scale: var == scale ** 2\n \"\"\"\n params = dict(loc=loc, scale=scale)\n dist = scipy.stats.norm\n super().__init__(dist=dist, params=params)\n\n\nclass MultigaussDist(DistBase):\n def __init__(self, mean=np.array([1.25, 5.0]), cov=np.array([[1, 0], [0, 1]])):\n \"\"\"\n :param loc: location of gaussian distribution\n :param scale: var == scale ** 2\n \"\"\"\n #params = dict(mean=mean, cov=cov)\n self.rvs = MVG(mean=mean,cov=cov)\n #super().__init__(dist=dist, params=params)\n\n def __call__(self, x):\n return np.exp(np.sum(self.rvs.logpdf(x)))\n\nclass MultigaussBorlangeDist(DistBase):\n def __init__(self, dist, mean=np.array([-2, -1.0, -1]), cov=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])):\n \"\"\"\n :param loc: location of gaussian distribution\n :param scale: var == scale ** 2\n \"\"\"\n #params = dict(mean=mean, cov=cov)\n self.rvs = MVG(mean=mean,cov=cov)\n #super().__init__(dist=dist, params=params)\n\n def __call__(self, x):\n return np.exp(np.sum(self.rvs.logpdf(x)))\n\n\n\n\nclass BetaDist(DistBase):\n def __init__(self, a=0.5, b=0.5, loc=0, scale=1):\n params = dict(a=a, b=b, loc=loc, scale=scale)\n dist = scipy.stats.beta\n super().__init__(dist=dist, params=params)\n\n\nclass GammaDist(DistBase):\n def __init__(self, a=2, loc=0, scale=1):\n params = dict(a=a, loc=loc, scale=scale)\n dist = scipy.stats.gamma\n super().__init__(dist=dist, params=params)\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import os\n\n dists = (GaussianDist, BetaDist, GammaDist)\n for dist in dists:\n distribution = dist()\n samples = distribution.sample(size=100)\n plt.hist(samples)\n plt.title(distribution)\n path = '/' + os.path.join(*os.path.abspath(__file__).split('/')[:-3], 'results',\n '{}.png'.format(dist.__name__))\n plt.savefig(path)\n plt.cla()\n",
"import time\n\nfrom rllab.algos.base import RLAlgorithm\nimport rllab.misc.logger as logger\nimport rllab.plotter as plotter\nfrom sandbox.rocky.tf.policies.base import Policy\nimport tensorflow as tf\nfrom sandbox.rocky.tf.samplers.batch_sampler import BatchSampler\nfrom sandbox.rocky.tf.samplers.vectorized_sampler import VectorizedSampler\nimport numpy as np\nfrom collections import deque\n\nfrom inverse_rl.utils.hyperparametrized import Hyperparametrized\n\n\nclass IRLBatchPolopt(RLAlgorithm, metaclass=Hyperparametrized):\n \"\"\"\n Base class for batch sampling-based policy optimization methods.\n This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.\n \"\"\"\n\n def __init__(\n self,\n env,\n policy,\n baseline,\n scope=None,\n n_itr=500,\n start_itr=0,\n batch_size=5000,\n max_path_length=500,\n discount=0.99,\n gae_lambda=1,\n plot=False,\n pause_for_plot=False,\n center_adv=True,\n positive_adv=False,\n store_paths=True,\n whole_paths=True,\n fixed_horizon=False,\n sampler_cls=None,\n sampler_args=None,\n force_batch_sampler=False,\n init_pol_params = None,\n irl_model=None,\n irl_model_wt=1.0,\n discrim_train_itrs=10,\n zero_environment_reward=False,\n init_irl_params=None,\n train_irl=True,\n key='',\n **kwargs\n ):\n \"\"\"\n :param env: Environment\n :param policy: Policy\n :type policy: Policy\n :param baseline: Baseline\n :param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms\n simultaneously, each using different environments and policies\n :param n_itr: Number of iterations.\n :param start_itr: Starting iteration.\n :param batch_size: Number of samples per iteration.\n :param max_path_length: Maximum length of a single rollout.\n :param discount: Discount.\n :param gae_lambda: Lambda used for generalized advantage estimation.\n :param plot: Plot evaluation run after each iteration.\n :param pause_for_plot: Whether to pause before contiuing when plotting.\n :param center_adv: Whether to rescale the advantages so that they have mean 0 and standard deviation 1.\n :param positive_adv: Whether to shift the advantages so that they are always positive. When used in\n conjunction with center_adv the advantages will be standardized before shifting.\n :param store_paths: Whether to save all paths data to the snapshot.\n :return:\n \"\"\"\n self.env = env\n self.policy = policy\n self.baseline = baseline\n self.scope = scope\n self.n_itr = n_itr\n self.start_itr = start_itr\n self.batch_size = batch_size\n self.max_path_length = max_path_length\n self.discount = discount\n self.gae_lambda = gae_lambda\n self.plot = plot\n self.pause_for_plot = pause_for_plot\n self.center_adv = center_adv\n self.positive_adv = positive_adv\n self.store_paths = store_paths\n self.whole_paths = whole_paths\n self.fixed_horizon = fixed_horizon\n self.init_pol_params = init_pol_params\n self.init_irl_params = init_irl_params\n self.irl_model = irl_model\n self.irl_model_wt = irl_model_wt\n self.no_reward = zero_environment_reward\n self.discrim_train_itrs = discrim_train_itrs\n self.train_irl = train_irl\n self.__irl_params = None\n self.myweights = []\n\n if self.irl_model_wt > 0:\n assert self.irl_model is not None, \"Need to specify a IRL model\"\n\n if sampler_cls is None:\n if self.policy.vectorized and not force_batch_sampler:\n print('using vec sampler')\n sampler_cls = VectorizedSampler\n else:\n print('using batch sampler')\n sampler_cls = BatchSampler\n if sampler_args is None:\n sampler_args = dict()\n self.sampler = sampler_cls(self, **sampler_args)\n self.init_opt()\n\n def start_worker(self):\n self.sampler.start_worker()\n if self.plot:\n #plotter.init_worker()\n plotter.init_plot(self.env, self.policy)\n\n def shutdown_worker(self):\n self.sampler.shutdown_worker()\n\n def obtain_samples(self, itr):\n return self.sampler.obtain_samples(itr)\n\n def process_samples(self, itr, paths):\n #processed = self.sampler.process_samples(itr, paths)\n return self.sampler.process_samples(itr, paths)\n\n def log_avg_returns(self, paths):\n undiscounted_returns = [sum(path[\"rewards\"]) for path in paths]\n avg_return = np.mean(undiscounted_returns)\n return avg_return\n\n def get_irl_params(self):\n return self.__irl_params\n\n def compute_irl(self, paths, itr=0):\n if self.no_reward:\n tot_rew = 0\n for path in paths:\n tot_rew += np.sum(path['rewards'])\n path['rewards'] *= 0\n logger.record_tabular('OriginalTaskAverageReturn', tot_rew/float(len(paths)))\n\n if self.irl_model_wt <=0:\n return paths\n\n if self.train_irl:\n max_itrs = self.discrim_train_itrs\n lr=1e-3\n mean_loss = self.irl_model.fit(paths, policy=self.policy, itr=itr, max_itrs=max_itrs, lr=lr,\n logger=logger)\n\n logger.record_tabular('IRLLoss', mean_loss)\n self.__irl_params = self.irl_model.get_params()\n\n probs = self.irl_model.eval(paths, gamma=self.discount, itr=itr)\n\n logger.record_tabular('IRLRewardMean', np.mean(probs))\n logger.record_tabular('IRLRewardMax', np.max(probs))\n logger.record_tabular('IRLRewardMin', np.min(probs))\n\n\n if self.irl_model.score_trajectories:\n # TODO: should I add to reward here or after advantage computation?\n for i, path in enumerate(paths):\n path['rewards'][-1] += self.irl_model_wt * probs[i]\n else:\n for i, path in enumerate(paths):\n path['rewards'] += self.irl_model_wt * probs[i]\n return paths\n\n def train(self,weightname):\n sess = tf.get_default_session()\n sess.run(tf.global_variables_initializer())\n if self.init_pol_params is not None:\n self.policy.set_param_values(self.init_pol_params)\n if self.init_irl_params is not None:\n self.irl_model.set_params(self.init_irl_params)\n self.start_worker()\n start_time = time.time()\n\n returns = []\n self.myweights.append(self.irl_model.get_weights())\n np.save(weightname, np.array(self.myweights))\n for itr in range(self.start_itr, self.n_itr):\n itr_start_time = time.time()\n with logger.prefix('itr #%d | ' % itr):\n logger.log(\"Obtaining samples...\")\n paths = self.obtain_samples(itr)\n\n logger.log(\"Processing samples...\")\n paths = self.compute_irl(paths, itr=itr)\n returns.append(self.log_avg_returns(paths))\n samples_data = self.process_samples(itr, paths)\n\n logger.log(\"Logging diagnostics...\")\n self.log_diagnostics(paths)\n logger.log(\"Optimizing policy...\")\n self.optimize_policy(itr, samples_data)\n logger.log(\"Saving snapshot...\")\n params = self.get_itr_snapshot(itr, samples_data) # , **kwargs)\n if self.store_paths:\n params[\"paths\"] = samples_data[\"paths\"]\n logger.save_itr_params(itr, params)\n logger.log(\"Saved\")\n logger.record_tabular('Time', time.time() - start_time)\n logger.record_tabular('ItrTime', time.time() - itr_start_time)\n logger.dump_tabular(with_prefix=False)\n if self.plot:\n self.update_plot()\n if self.pause_for_plot:\n input(\"Plotting evaluation run: Press Enter to \"\n \"continue...\")\n self.myweights.append(self.irl_model.get_weights())\n if itr%20 == 0:\n np.save(weightname, np.array(self.myweights))\n np.save(weightname, np.array(self.myweights))\n self.shutdown_worker()\n return \n\n def log_diagnostics(self, paths):\n self.env.log_diagnostics(paths)\n self.policy.log_diagnostics(paths)\n self.baseline.log_diagnostics(paths)\n\n def init_opt(self):\n \"\"\"\n Initialize the optimization procedure. If using tensorflow, this may\n include declaring all the variables and compiling functions\n \"\"\"\n raise NotImplementedError\n\n def get_itr_snapshot(self, itr, samples_data):\n \"\"\"\n Returns all the data that should be saved in the snapshot for this\n iteration.\n \"\"\"\n raise NotImplementedError\n\n def optimize_policy(self, itr, samples_data):\n raise NotImplementedError\n\n def update_plot(self):\n if self.plot:\n plotter.update_plot(self.policy, self.max_path_length)\n",
"import gym\nfrom gym import spaces\nfrom gym import utils\nimport numpy as np\nimport logging\nimport numpy.random as rn\nimport math\nfrom itertools import product\nfrom scipy.sparse import load_npz\nfrom scipy.sparse import hstack\nfrom scipy.sparse import vstack\nfrom scipy.sparse.coo import coo_matrix\nimport os\n\nlogger = logging.getLogger(__name__)\n\n\nclass SwedenWorldEnv(gym.Env, utils.EzPickle):\n metadata = {'render.modes': ['human']}\n\n def __init__(self):\n self.basedir = \"/home/user/basedir/\" #fill in your path here.\n self.discount = 0.99\n self.sweden_dir = os.path.join(self.basedir,\"mdp/metaborlange/newattempt2\")\n self.destination = 7621 # Destination is in MATLAB indices. So remember to substract indices at right place\n self.horizon = 100\n \n self.incidence_smat_no_dummy = load_npz(os.path.join(self.sweden_dir, \"incidence_no_dummy.npz\"))\n self.incidence_smat_dummy = load_npz(os.path.join(self.sweden_dir, \"incidence_dummy.npz\"))\n self.incidence_smat = load_npz(os.path.join(self.sweden_dir, \"incidence.npz\"))\n self.travel_time_smat = load_npz(os.path.join(self.sweden_dir, \"travel_time.npz\"))\n self.turnangle_smat = load_npz(os.path.join(self.sweden_dir, \"turn_angle.npz\"))\n self.uturn_smat = load_npz(os.path.join(self.sweden_dir, \"u_turn.npz\"))\n self.lefturn_smat = load_npz(os.path.join(self.sweden_dir, \"left_turn.npz\"))\n self.observation_smat = load_npz(os.path.join(self.sweden_dir, \"observation.npz\"))\n\n self.gt_theta = np.array([-2., -1., -1., -20.])\n\n self.N_ACTIONS = 6 # 0-4 correspond to turn angles from -3.14 to 3.14. Action 5 corresponds to reaching destination\n self.N_ROADLINKS = self.travel_time_smat.shape[0]\n # self.goal_reward = 10.\n\n self.features = np.load(os.path.join(self.sweden_dir, \"new_feat_data.npy\"))\n self.transition_probabilities = self.getTP()\n self.state_debug = np.load(os.path.join(self.sweden_dir, \"new_state_debug.npy\"), allow_pickle=True).item()\n self.rewards = np.load(os.path.join(self.sweden_dir, \"virtual_rewards.npy\"))\n self.nodummy_states = np.array(np.load(os.path.join(self.sweden_dir, \"nodummy_states.npy\")))\n self.gt_theta = np.array([-2., -1., -1., -20.])\n self.N_STATES, self.N_FEATURES = np.shape(self.features)\n self.viewer = None\n self.server_process = None\n self.server_port = None\n\n self.state = None\n self.obs = None\n\n self.observation_space = spaces.Box(low=min(0., np.min(self.features)),\n high=max(1., np.max(self.features)),\n shape=(self.N_FEATURES,))\n # Action space omits the Tackle/Catch actions, which are useful on defense\n # self.action_space = spaces.Discrete(len(self.actions))\n self.action_space = spaces.Box(low=0, high=self.N_ACTIONS,\n shape=(1,))\n self.reset()\n print(\"init over\")\n\n def step(self, action):\n #action = (np.round(action)).astype(np.int)[0]\n #action2 = min(max(action, 0), self.N_ACTIONS)\n action2 = np.floor(action).astype(np.int)[0]\n action3 = min(max(action2, 0), self.N_ACTIONS-1)\n if not(action3<self.N_ACTIONS):\n print(\"This should not happen\")\n print(action,action2,action3)\n obs, obsind, reward, done = self._take_action(action3)\n return obs, reward, done, {}\n\n def _take_action(self, action):\n currentTp = self.transition_probabilities[action]\n rowind = np.where(currentTp.row == self.state)[0]\n assert (len(rowind) > 0)\n next_state = currentTp.col[rowind].item()\n self.state = next_state\n self.obs = self.features[next_state]\n reward = self.rewards[next_state]\n if next_state == 20198:\n done=True\n else:\n done=False\n return self.obs, next_state, reward, done\n\n def reset(self):\n newState = np.random.choice(self.nodummy_states, 1).item()\n newObs = self.features[newState]\n self.state = newState\n self.obs = newObs\n return self.obs\n\n def _render(self, mode='human', close=False):\n \"\"\" Viewer only supports human mode currently. \"\"\"\n raise NotImplementedError\n\n def getTP(self):\n transitions = np.load(os.path.join(self.sweden_dir, \"new_transitions.npy\"), allow_pickle=True).item()\n nstates, nfeatures = np.shape(self.features)\n transition_dynamics = {}\n for i in range(self.N_ACTIONS):\n tpsparse = coo_matrix((transitions[i][2, :], (transitions[i][0, :], transitions[i][1, :])),\n shape=(nstates, nstates))\n tpdense = tpsparse.toarray()\n assert (np.max(np.sum(tpdense, axis=1)) == 1. and np.min(np.sum(tpdense, axis=1)) == 1.)\n transition_dynamics[i] = coo_matrix((transitions[i][2, :], (transitions[i][0, :], transitions[i][1, :])),\n shape=(nstates, nstates))\n return transition_dynamics\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.savefig",
"scipy.stats.multivariate_normal",
"numpy.array",
"matplotlib.pyplot.hist"
],
[
"tensorflow.get_default_session",
"numpy.min",
"numpy.max",
"tensorflow.global_variables_initializer",
"numpy.mean",
"numpy.array",
"numpy.sum"
],
[
"scipy.sparse.coo.coo_matrix",
"numpy.random.choice",
"numpy.min",
"numpy.max",
"numpy.shape",
"numpy.floor",
"numpy.array",
"numpy.where",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
edesz/electricity-consumption-forecast | [
"9bc49523d9c2ed6d827ce690916980cf7e818fed"
] | [
"src/data_prep_helpers.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport pandas as pd\n\n\ndef add_corona_dates(df, index_name, strategy=[\"during_corona\", \"no_corona\"]):\n \"\"\"\n Inputs\n ------\n strategy : List\n division of datetimes based on stages of corona; acceptable strategies\n are one of the following (order in list does not matter)\n - ['during_corona', 'no_corona']\n - ['pre_corona', 'during_corona', 'post_corona']\n\n SOURCE\n ------\n https://github.com/facebook/prophet/issues/1416#issuecomment-618553502\n \"\"\"\n d_corona = {\n \"BE\": [\n pd.to_datetime(\"2020-03-07 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"CH\": [\n pd.to_datetime(\"2020-03-07 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"CZ\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"DE\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"ES\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"FR\": [\n pd.to_datetime(\"2020-03-07 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"HR\": [\n pd.to_datetime(\"2020-03-21 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"IT\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"NL\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n \"PL\": [\n pd.to_datetime(\"2020-03-14 00:00:00\"),\n pd.to_datetime(\"2020-04-12 23:00:00\"),\n ],\n }\n df_corona = (\n pd.DataFrame.from_dict(d_corona, orient=\"index\")\n .reset_index()\n .rename(\n columns={0: \"corona_start\", 1: \"corona_end\", \"index\": \"country\"}\n )\n )\n df = df.merge(df_corona, on=\"country\", how=\"left\")\n\n # Add corona periods based on specified strategy\n strategies_dict = {\n \"dn\": [\"during_corona\", \"no_corona\"],\n \"pdp\": [\"pre_corona\", \"during_corona\", \"post_corona\"],\n }\n if set(strategy) == set(strategies_dict[\"dn\"]):\n df[\"no_corona\"] = (df[index_name] < df[\"corona_start\"]) | (\n df[index_name] > df[\"corona_end\"]\n )\n elif set(strategy) == set(strategies_dict[\"pdp\"]):\n df[\"pre_corona\"] = df[index_name] < df[\"corona_start\"]\n df[\"post_corona\"] = df[index_name] > df[\"corona_end\"]\n else:\n strategies = \"\"\n for _, v in strategies_dict.items():\n strategies += \"['\" + \"', '\".join(map(str, v)) + \"'], \"\n strategies = strategies.rstrip(\", \")\n raise Exception(\n f\"Unsupported corona strategy. Expected one of: {strategies}\"\n )\n df[\"during_corona\"] = (df[index_name] >= df[\"corona_start\"]) & (\n df[index_name] <= df[\"corona_end\"]\n )\n return df\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
barcawy/OpenNE | [
"88018ed9bf34d09020464a430e09afb704b1f322"
] | [
"src/openne/Z_0709.py"
] | [
"from __future__ import print_function\nimport time\nimport math\nimport random\nimport numpy as np\nimport pickle as pkl\nimport networkx as nx\nfrom gensim.models import Word2Vec\nfrom fastdtw import fastdtw\nfrom collections import Counter\nfrom collections import defaultdict\nimport os\n\nclass Z(object):\n\n def __init__(self, graph, path_length, num_paths, dim, prefix, hop, **kwargs):\n\n kwargs[\"workers\"] = kwargs.get(\"workers\", 4)\n\n #kwargs[\"hs\"] = 1 # 1 分层softmax 0 负采样\n\n self.graph = graph\n preprocess = False\n if preprocess:\n self.ppr_matrix = self.constructSubGraph(hop)\n self.degrees, self.degree_permuted = self.create_degree()\n self.degree_neighbors, self.norm_weight = self.create_ppr_sample_table()\n self.dump_to_disk(self.degree_neighbors,'E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_neighbors')\n self.dump_to_disk(self.norm_weight,'E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_weight')\n else:\n self.degree_neighbors = self.load_pkl('E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_neighbors')\n self.norm_weight = self.load_pkl('E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_weight')\n sentences = self.simulate_walks(\n num_walks=num_paths, walk_length=path_length)\n kwargs[\"sentences\"] = sentences\n kwargs[\"min_count\"] = kwargs.get(\"min_count\", 0)\n kwargs[\"size\"] = kwargs.get(\"size\", dim)\n kwargs[\"sg\"] = 1 # 1 skipgram; 0 CBOW\n\n self.size = kwargs[\"size\"]\n print(\"Learning representation...\")\n word2vec = Word2Vec(**kwargs)\n self.vectors = {}\n for word in graph.G.nodes():\n self.vectors[word] = word2vec.wv[word]\n del word2vec\n\n def dump_to_disk(self, f, file_name):\n with open(file_name + '.pkl', 'wb') as handle:\n pkl.dump(f, handle, protocol=pkl.HIGHEST_PROTOCOL)\n\n def load_pkl(self, file_name):\n with open(file_name + '.pkl', 'rb') as handle:\n val = pkl.load(handle)\n return val\n\n def neighbors(self, fringe):\n # find all 1-hop neighbors of nodes in fringe from A\n graph = self.graph.G\n res = set()\n for node in fringe:\n nei = graph.neighbors(node)\n nei = set(nei)\n res = res.union(nei)\n return res\n\n def constructSubGraph(self, hop):\n graph = self.graph.G\n edge_set = set(graph.edges())\n nodes = list(graph.nodes())\n #subgraph_map = defaultdict(nx.Graph)\n ppr_matrix = {}\n for node in nodes:\n subgraph_map = nx.Graph()\n subgraph_map.add_node(node)\n fringe = set(node)\n visited = set(node)\n for dist in range(0, hop):\n fringe = self.neighbors(fringe)\n fringe = fringe - visited\n visited = visited.union(fringe)\n visited = list(visited)\n for pos_u, u in enumerate(visited):\n for v in visited[pos_u+1:]:\n if (u, v) in edge_set or (v, u) in edge_set:\n subgraph_map.add_edge(u, v)\n\n ppr_matrix[node] = Counter()\n walk = self.subgraph_walk(subgraph_map, walk_length=500, start_node=node)\n ppr_matrix[node].update(walk)\n return ppr_matrix\n\n def subgraph_walk(self, subGraph, walk_length, start_node):\n '''\n Simulate a random walk starting from start node.\n '''\n G = subGraph\n walk = [start_node]\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = list(G.neighbors(cur))\n if len(cur_nbrs) > 0:\n walk.append(random.choice(cur_nbrs))\n else:\n # 独立的点\n break\n return walk\n\n def deepwalk_walk(self, walk_length, start_node, alpha = 0.5):\n '''\n Simulate a random walk starting from start node.\n '''\n G = self.graph.G\n walk = [start_node]\n while len(walk) < walk_length:\n cur = walk[-1]\n alpha = 1#alpha/G.degree(cur)\n if np.random.rand() < alpha:\n walk.append(np.random.choice(self.degree_neighbors[cur], p=self.norm_weight[cur]))\n else:\n cur_nbrs = list(G.neighbors(cur))\n if len(cur_nbrs) > 0:\n # node2vec\n n2v = 0\n if n2v:\n nbr = random.choice(cur_nbrs)\n if set(cur_nbrs) & set(G.neighbors(nbr)):\n walk.append(random.choice(cur_nbrs))\n else:\n walk.append(nbr)\n else:\n # deepwalk\n walk.append(random.choice(cur_nbrs))\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n '''\n Repeatedly simulate random walks from each node.\n '''\n G = self.graph.G\n walks = []\n nodes = list(G.nodes())\n print('Simulate walk iteration:')\n for walk_iter in range(num_walks):\n # pool = multiprocessing.Pool(processes = 4)\n print(str(walk_iter + 1), '/', str(num_walks))\n random.shuffle(nodes)\n for node in nodes:\n # walks.append(pool.apply_async(deepwalk_walk_wrapper, (self, walk_length, node, )))\n walks.append(self.deepwalk_walk(\n walk_length=walk_length, start_node=node))\n # pool.close()\n # pool.join()\n # print(len(walks))\n return walks\n\n def create_degree(self):\n G = self.graph.G\n print(\"- Creating degree vectors...\")\n degrees = {}\n degrees_sorted = set()\n degree_permuted = {}\n for v in G.nodes():\n degree = G.degree(v)\n degrees_sorted.add(degree)\n degree_permuted[v] = degree\n if (degree not in degrees):\n degrees[degree] = {}\n degrees[degree]['vertices'] = []\n degrees[degree]['vertices'].append(v)\n degrees_sorted = np.array(list(degrees_sorted), dtype='int')\n # degree_permuted = degrees_sorted\n degrees_sorted = np.sort(degrees_sorted)\n l = len(degrees_sorted)\n for index, degree in enumerate(degrees_sorted):\n if (index > 0):\n degrees[degree]['before'] = degrees_sorted[index - 1]\n if (index < (l - 1)):\n degrees[degree]['after'] = degrees_sorted[index + 1]\n print(\"- Degree vectors created.\")\n return degrees, degree_permuted\n\n def create_ppr_sample_table(self):\n print(\"- Creating PPR sample table ...\")\n nodes = list(self.graph.G.nodes())\n degree_neighbors = {}\n norm_weight = {}\n nodes_num = len(nodes)\n k = 0\n for node in nodes:\n print(str(k + 1), '/', str(nodes_num))\n k += 1\n degree_neighbors[node] = self.get_vertices(node)\n norm_weight[node] = self.ppr_sample(node, degree_neighbors[node])\n print(\"- PPR sample table created.\")\n return degree_neighbors, norm_weight\n\n def cost(self, a, b):\n ep = 0.001\n m = max(a, b) + ep\n mi = min(a, b) + ep\n return ((m / mi) - 1)\n\n def ppr_sample(self, node, neighbors):\n node_ppr_v = [i[1] for i in self.ppr_matrix[node].most_common()]#[1:]\n if len(node_ppr_v) == 0:\n node_ppr_v = [1]\n sim_list = []\n nodes_num = len(self.graph.G.nodes())\n for _neighbor in neighbors:\n neighbor_ppr_v = [i[1] for i in self.ppr_matrix[_neighbor].most_common()]#[1:]\n if len(neighbor_ppr_v) == 0:\n neighbor_ppr_v = [1]\n dits_dtw, _ = fastdtw(node_ppr_v, neighbor_ppr_v, radius=1, dist=self.cost)\n sim_list.append(np.exp(-1.0 * dits_dtw))\n\n norm_weight = [float(i) / sum(sim_list) for i in sim_list]\n # sampled_neighbor = np.random.choice(neighbors, p=norm_weight)\n return norm_weight\n\n def verifyDegrees(self, degree_v_root, degree_a, degree_b):\n\n if (degree_b == -1):\n degree_now = degree_a\n elif (degree_a == -1):\n degree_now = degree_b\n elif (abs(degree_b - degree_v_root) < abs(degree_a - degree_v_root)):\n degree_now = degree_b\n else:\n degree_now = degree_a\n\n return degree_now\n\n def get_vertices(self, v):\n num_seleted = 2 * math.log(len(self.graph.G.nodes()), 2)\n vertices = []\n\n degree_v = self.graph.G.degree(v)\n\n try:\n c_v = 0\n\n for v2 in self.degrees[degree_v]['vertices']:\n if (v != v2):\n vertices.append(v2)\n c_v += 1\n if (c_v > num_seleted):\n raise StopIteration\n\n if ('before' not in self.degrees[degree_v]):\n degree_b = -1\n else:\n degree_b = self.degrees[degree_v]['before']\n if ('after' not in self.degrees[degree_v]):\n degree_a = -1\n else:\n degree_a = self.degrees[degree_v]['after']\n if (degree_b == -1 and degree_a == -1):\n raise StopIteration\n degree_now = self.verifyDegrees(degree_v, degree_a, degree_b)\n\n while True:\n for v2 in self.degrees[degree_now]['vertices']:\n if (v != v2):\n vertices.append(v2)\n c_v += 1\n if (c_v > num_seleted):\n raise StopIteration\n\n if (degree_now == degree_b):\n if ('before' not in self.degrees[degree_b]):\n degree_b = -1\n else:\n degree_b = self.degrees[degree_b]['before']\n else:\n if ('after' not in self.degrees[degree_a]):\n degree_a = -1\n else:\n degree_a = self.degrees[degree_a]['after']\n\n if (degree_b == -1 and degree_a == -1):\n raise StopIteration\n\n degree_now = self.verifyDegrees(degree_v, degree_a, degree_b)\n\n except StopIteration:\n return list(vertices)\n\n return list(vertices)\n\n def save_embeddings(self, filename):\n fout = open(filename, 'w')\n node_num = len(self.vectors.keys())\n fout.write(\"{} {}\\n\".format(node_num, self.size))\n for node, vec in self.vectors.items():\n fout.write(\"{} {}\\n\".format(node,\n ' '.join([str(x) for x in vec])))\n fout.close()\n\n def save_results(self, filename, method, ratio, result):\n fout = open(filename, 'w')\n node_num = len(self.vectors)\n fout.write(\"{} {} {} \\n\".format(method, ratio, result))\n fout.close()\n\n"
] | [
[
"numpy.exp",
"numpy.random.rand",
"numpy.sort",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
strawberrypie/jina-hub | [
"8b2356d58687694d817881c840745214f12e94c4",
"8b2356d58687694d817881c840745214f12e94c4",
"8b2356d58687694d817881c840745214f12e94c4",
"8b2356d58687694d817881c840745214f12e94c4",
"8b2356d58687694d817881c840745214f12e94c4",
"8b2356d58687694d817881c840745214f12e94c4",
"8b2356d58687694d817881c840745214f12e94c4"
] | [
"crafters/image/ImageNormalizer/__init__.py",
"crafters/image/ImageReader/tests/test_imagereader.py",
"crafters/image/ImageResizer/__init__.py",
"encoders/numeric/IncrementalPCAEncoder/__init__.py",
"crafters/image/ImageCropper/__init__.py",
"segmenters/image/SlidingWindowImageCropper/__init__.py",
"segmenters/audio/AudioSlicer/tests/test_audioslicer.py"
] | [
"__copyright__ = \"Copyright (c) 2021 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nfrom typing import Tuple, Dict, Union, Iterable\n\nimport numpy as np\n\nfrom jina.executors.decorators import single\nfrom jina.executors.crafters import BaseCrafter\n\nfrom .helper import _load_image, _move_channel_axis, _crop_image, _resize_short\n\n\nclass ImageNormalizer(BaseCrafter):\n \"\"\"\n Normalize the image.\n\n :class:`ImageNormalizer` works on doc-level,\n it receives values of file names on the\n doc-level and returns image matrix on the chunk-level\n\n :param target_size: Desired output size. If size is a sequence\n like (h, w), the output size will be matched to this.\n If size is an int, the smaller edge of the image will be matched\n to this number maintaining the aspect ratio.\n :param img_mean: The mean of the images in `RGB` channels.\n Set to `[0.485, 0.456, 0.406]` for the models trained\n on `imagenet` with pytorch backbone.\n :param img_std: the std of the images in `RGB` channels.\n Set to `[0.229, 0.224, 0.225]` for the models trained\n on `imagenet` with pytorch backbone.\n :param resize_dim: the size of images' height and width to be resized to.\n The images are resized before cropping to the output size\n :param channel_axis: the axis id of the color channel,\n ``-1`` indicates the color channel info at the last axis\n \"\"\"\n\n def __init__(self,\n target_size: Union[Iterable[int], int] = 224,\n img_mean: Tuple[float] = (0, 0, 0),\n img_std: Tuple[float] = (1, 1, 1),\n resize_dim: int = 256,\n channel_axis: int = -1,\n *args,\n **kwargs):\n \"\"\"Set Constructor.\"\"\"\n super().__init__(*args, **kwargs)\n if isinstance(target_size, int):\n self.target_size = target_size\n elif isinstance(target_size, Iterable):\n self.target_size = tuple(target_size)\n else:\n raise ValueError(f'target_size {target_size} should be an integer or tuple/list of 2 integers')\n self.resize_dim = resize_dim\n self.img_mean = np.array(img_mean).reshape((1, 1, 3))\n self.img_std = np.array(img_std).reshape((1, 1, 3))\n self.channel_axis = channel_axis\n\n @single\n def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:\n \"\"\"\n Normalize the image.\n\n :param blob: the ndarray of the image with the color channel at the last axis\n :return: a chunk dict with the normalized image\n \"\"\"\n raw_img = _load_image(blob, self.channel_axis)\n _img = self._normalize(raw_img)\n img = _move_channel_axis(_img, -1, self.channel_axis)\n return dict(offset=0, blob=img)\n\n def _normalize(self, img):\n img = _resize_short(img, target_size=self.resize_dim)\n img, _, _ = _crop_image(img, target_size=self.target_size, how='center')\n img = np.array(img).astype('float32')/255\n img -= self.img_mean\n img /= self.img_std\n return img\n",
"import io\n\nimport numpy as np\nfrom PIL import Image\n\nfrom .. import ImageReader\n\n\ndef create_test_image(output_fn, size_width=50, size_height=50):\n from PIL import Image\n image = Image.new('RGB', size=(size_width, size_height), color=(155, 0, 0))\n with open(output_fn, \"wb\") as f:\n image.save(f, 'jpeg')\n\n\ndef test_io_uri():\n crafter = ImageReader()\n tmp_fn = crafter.get_file_from_workspace('test.jpeg')\n img_size = 50\n create_test_image(tmp_fn, size_width=img_size, size_height=img_size)\n test_docs = crafter.craft([None, None], np.stack([tmp_fn, tmp_fn]))\n assert len(test_docs) == 2\n for test_doc in test_docs:\n assert test_doc['blob'].shape == (img_size, img_size, 3)\n\n\ndef test_io_buffer():\n crafter = ImageReader()\n tmp_fn = crafter.get_file_from_workspace('test.jpeg')\n img_size = 50\n create_test_image(tmp_fn, size_width=img_size, size_height=img_size)\n image_buffer = io.BytesIO()\n img = Image.open(tmp_fn)\n img.save(image_buffer, format='PNG')\n image_buffer.seek(0)\n test_docs = crafter.craft(np.stack([image_buffer.getvalue(), image_buffer.getvalue()]), [None, None])\n assert len(test_docs) == 2\n for test_doc in test_docs:\n assert test_doc['blob'].shape == (img_size, img_size, 3)\n np.testing.assert_almost_equal(test_doc['blob'], np.array(img).astype('float32'))\n",
"__copyright__ = \"Copyright (c) 2021 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nimport warnings\nfrom typing import Union, Tuple, Dict, Iterable\n\nimport numpy as np\n\nfrom jina.executors.decorators import single\nfrom jina.executors.crafters import BaseCrafter\n\nfrom .helper import _load_image, _move_channel_axis, _resize_short\n\n\nclass ImageResizer(BaseCrafter):\n \"\"\"\n Resize image to the given size.\n\n :param target_size: Desired output size.\n If size is a sequence like (h, w), the output size will\n be matched to this. If size is an int, the smaller edge\n of the image will be matched to this number maintain\n the aspect ratio.\n :param how: The interpolation method. Valid values include\n `NEAREST`, `BILINEAR`, `BICUBIC`, and `LANCZOS`.\n Default is `BILINEAR`. Please refer to `PIL.Image` for details.\n \"\"\"\n\n def __init__(self,\n target_size: Union[Iterable[int], int] = 224,\n how: str = 'BILINEAR',\n channel_axis: int = -1,\n *args, **kwargs):\n warnings.warn(f'{self!r} will be retired soon, you can add `- !URI2Blob {{}}` to the drivers', DeprecationWarning)\n super().__init__(*args, **kwargs)\n if isinstance(target_size, int):\n self.output_dim = target_size\n elif isinstance(target_size, Iterable):\n self.output_dim = tuple(target_size)\n else:\n raise ValueError(f'output_dim {target_size} should be an integer or tuple/list of 2 integers')\n self.how = how\n self.channel_axis = channel_axis\n\n @single\n def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:\n \"\"\"\n Resize the image array to the given size.\n\n :param blob: The ndarray of the image\n :return: A dict with the cropped image\n \"\"\"\n raw_img = _load_image(blob, self.channel_axis)\n _img = _resize_short(raw_img, self.output_dim, self.how)\n img = _move_channel_axis(np.asarray(_img), -1, self.channel_axis)\n return dict(offset=0, blob=img.astype('float32'))\n",
"__copyright__ = \"Copyright (c) 2021 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nfrom jina.executors.encoders.numeric import TransformEncoder\n\nclass IncrementalPCAEncoder(TransformEncoder):\n \"\"\"\n Encodes data from an ndarray in size `B x T` into an ndarray in size `B x D`.\n\n Where `B` is the batch's size and `T` and `D` are the dimensions pre (`T`)\n and after (`D`) the encoding.\n\n :param output_dim: the output size.\n :param num_features: the number of input features.\n If ``num_features`` is None, then ``num_features``\n is inferred from the data\n :param whiten: If whiten is false, the data is already considered to be whitened,\n and no whitening is performed.\n :param args: Additional positional arguments\n :param kwargs: Additional keyword arguments\n\n More details can be found\n `here <https://scikit-learn.org/stable/auto_examples/decomposition/plot_incremental_pca.html>`_\n\n .. note::\n :class:`IncrementalPCAEncoder` must be trained before calling ``encode()``.\n This encoder can be trained in an incremental way.\n \"\"\"\n\n def __init__(self, num_features: int = None, whiten: bool = False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.whiten = whiten\n self.num_features = num_features\n self.is_trained = False\n self.model = None\n\n def post_init(self):\n \"\"\"Load IncrementalPCA model\"\"\"\n super().post_init()\n if not self.model:\n from sklearn.decomposition import IncrementalPCA\n self.model = IncrementalPCA(\n n_components=self.output_dim,\n whiten=self.whiten)",
"__copyright__ = \"Copyright (c) 2021 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nfrom typing import Dict\n\nimport numpy as np\n\nfrom jina.executors.decorators import single\nfrom jina.executors.crafters import BaseCrafter\n\nfrom .helper import _crop_image, _move_channel_axis, _load_image\n\n\nclass ImageCropper(BaseCrafter):\n \"\"\"\n Crop the image with the specific crop box.\n\n The coordinate is the same coordinate-system in\n the :py:mode:`PIL.Image`.\n\n :param top: the vertical coordinate of the top left\n corner of the crop box.\n :param left: the horizontal coordinate of the top\n left corner of the crop box.\n :param height: the height of the crop box.\n :param width: the width of the crop box.\n :param channel_axis: the axis referring to the channels\n :param args: Additional positional arguments\n :param kwargs: Additional keyword arguments\n \"\"\"\n\n def __init__(self, top: int = 0, left: int = 0, height: int = 224, width: int = 224, channel_axis: int = -1, *args, **kwargs):\n \"\"\"Set Constructor.\"\"\"\n super().__init__(*args, **kwargs)\n self.top = top\n self.left = left\n self.height = height\n self.width = width\n self.channel_axis = channel_axis\n\n @single\n def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:\n \"\"\"\n Crop the input image array.\n\n :param blob: The ndarray of the image\n :return: A dict with the cropped image\n \"\"\"\n raw_img = _load_image(blob, self.channel_axis)\n _img, top, left = _crop_image(raw_img, target_size=(self.height, self.width), top=self.top, left=self.left)\n img = _move_channel_axis(np.asarray(_img), -1, self.channel_axis)\n return dict(offset=0, blob=img.astype('float32'), location=(top, left))\n\n",
"from typing import Tuple, Dict, List, Union\n\nimport numpy as np\n\nfrom jina.executors.decorators import single\nfrom jina.executors.segmenters import BaseSegmenter\n\nfrom .helper import _move_channel_axis\n\n\nclass SlidingWindowImageCropper(BaseSegmenter):\n \"\"\"\n :class:`SlidingWindowImageCropper` crops the image with a sliding window.\n\n :param target_size: desired output size. If size is a sequence like (h, w),\n the output size will be matched to this.\n If size is an int, the output will have the\n same height and width as the `target_size`.\n :param strides: the strides between two neighboring sliding windows.\n `strides` is a sequence like (h, w),\n in which denote the strides on the vertical\n and the horizontal axis.\n :param padding: If False, only patches which are\n fully contained in the input image are included.\n If True, all patches whose starting point\n is inside the input are included,\n and areas outside the input default to zero.\n The `padding` argument has no effect on the size of each patch,\n it determines how many patches are extracted.\n Default is False.\n :param args: Additional positional arguments\n :param kwargs: Additional keyword arguments\n \"\"\"\n\n def __init__(self,\n target_size: Union[Tuple[int], int] = 16,\n strides: Tuple[int, int] = (2, 2),\n padding: bool = False,\n channel_axis: int = -1,\n *args,\n **kwargs):\n \"\"\"Set constructor\"\"\"\n super().__init__(*args, **kwargs)\n self.target_size = target_size\n if len(strides) != 2:\n raise ValueError(f'strides should be a tuple of two integers: {strides}')\n self.stride_h, self.stride_w = strides\n self.padding = padding\n self.channel_axis = channel_axis\n\n def _add_zero_padding(self, img: 'np.ndarray') -> 'np.ndarray':\n h, w, c = img.shape\n ext_h = self.target_size - h % self.stride_h\n ext_w = self.target_size - w % self.stride_w\n return np.pad(img,\n ((0, ext_h), (0, ext_w), (0, 0)),\n mode='constant',\n constant_values=0)\n\n @single\n def segment(self, blob: 'np.ndarray', *args, **kwargs) -> List[Dict]:\n \"\"\"\n Crop the input image array with a sliding window.\n\n :param blob: the ndarray of the image with the color channel at the last axis\n :return: a list of chunk dicts with the cropped images.\n :param args: Additional positional arguments\n :param kwargs: Additional keyword arguments\n \"\"\"\n raw_img = np.copy(blob)\n raw_img = _move_channel_axis(raw_img, self.channel_axis)\n if self.padding:\n raw_img = self._add_zero_padding(blob)\n h, w, c = raw_img.shape\n row_step = raw_img.strides[0]\n col_step = raw_img.strides[1]\n\n expanded_img = np.lib.stride_tricks.as_strided(\n raw_img,\n shape=(\n 1 + int((h - self.target_size) / self.stride_h),\n 1 + int((w - self.target_size) / self.stride_w),\n self.target_size,\n self.target_size,\n c\n ),\n strides=(\n row_step * self.stride_h,\n col_step * self.stride_w,\n row_step,\n col_step,\n 1), writeable=False)\n\n bbox_locations = [\n (h * self.stride_h, w * self.stride_w)\n for h in range(expanded_img.shape[0])\n for w in range(expanded_img.shape[1])]\n\n expanded_img = expanded_img.reshape((-1, self.target_size, self.target_size, c))\n\n results = []\n for location, _blob in zip(bbox_locations, expanded_img):\n blob = _move_channel_axis(_blob, -1, self.channel_axis)\n results.append(dict(offset=0, weight=1.0, blob=blob.astype('float32'), location=location))\n return results\n",
"import numpy as np\n\nfrom .. import AudioSlicer\n\n\ndef test_slice_mono():\n n_frames = 100\n frame_length = 2048\n signal_orig = np.random.randn(frame_length * n_frames)\n\n segmenter = AudioSlicer(frame_length, frame_length)\n\n segmented_chunks_per_doc = segmenter.segment(np.stack([signal_orig, signal_orig]))\n assert len(segmented_chunks_per_doc) == 2\n for segmented_chunk in segmented_chunks_per_doc:\n assert len(segmented_chunk) == n_frames\n\n\ndef test_slice_stereo():\n n_frames = 100\n frame_length = 2048\n signal_orig = np.random.randn(2, frame_length * n_frames)\n\n segmenter = AudioSlicer(frame_length, frame_length)\n segmented_chunks_per_doc = segmenter.segment(np.stack([signal_orig, signal_orig]))\n assert len(segmented_chunks_per_doc) == 2\n for segmented_chunk in segmented_chunks_per_doc:\n assert len(segmented_chunk) == n_frames * 2\n"
] | [
[
"numpy.array"
],
[
"numpy.array",
"numpy.stack"
],
[
"numpy.asarray"
],
[
"sklearn.decomposition.IncrementalPCA"
],
[
"numpy.asarray"
],
[
"numpy.copy",
"numpy.pad"
],
[
"numpy.random.randn",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qiguming/mmdetection | [
"68532eb6f4643ddf0179a4384c8c9e004a2c1d07",
"84fbb2c6ee7346ea722cea3a4fa16d73e11fcafd",
"84fbb2c6ee7346ea722cea3a4fa16d73e11fcafd"
] | [
"mmdet/ops/point_sample.py",
"mmdet/models/roi_heads/dynamic_roi_head.py",
"mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py"
] | [
"# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.utils import _pair\n\n\ndef normalize(grid):\n \"\"\"Normalize input grid from [-1, 1] to [0, 1]\n\n Args:\n grid (Tensor): The grid to be normalize, range [-1, 1].\n\n Returns:\n Tensor: Normalized grid, range [0, 1].\n \"\"\"\n\n return (grid + 1.0) / 2.0\n\n\ndef denormalize(grid):\n \"\"\"Denormalize input grid from range [0, 1] to [-1, 1]\n Args:\n grid (Tensor): The grid to be denormalize, range [0, 1].\n\n Returns:\n Tensor: Denormalized grid, range [-1, 1].\n \"\"\"\n\n return grid * 2.0 - 1.0\n\n\ndef generate_grid(num_grid, size, device):\n \"\"\"Generate regular square grid of points in [0, 1] x [0, 1] coordinate\n space.\n\n Args:\n num_grid (int): The number of grids to sample, one for each region.\n size (tuple(int, int)): The side size of the regular grid.\n device (torch.device): Desired device of returned tensor.\n\n Returns:\n (torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that\n contains coordinates for the regular grids.\n \"\"\"\n\n affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)\n grid = F.affine_grid(\n affine_trans, torch.Size((1, 1, *size)), align_corners=False)\n grid = normalize(grid)\n return grid.view(1, -1, 2).expand(num_grid, -1, -1)\n\n\ndef rel_roi_point_to_abs_img_point(rois, rel_roi_points):\n \"\"\"Convert roi based relative point coordinates to image based absolute\n point coordinates.\n\n Args:\n rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)\n rel_roi_points (Tensor): Point coordinates inside RoI, relative to\n RoI, location, range (0, 1), shape (N, P, 2)\n\n Returns:\n Tensor: Image based absolute point coordinates, shape (N, P, 2)\n \"\"\"\n\n with torch.no_grad():\n assert rel_roi_points.size(0) == rois.size(0)\n assert rois.dim() == 2\n assert rel_roi_points.dim() == 3\n assert rel_roi_points.size(2) == 2\n # remove batch idx\n if rois.size(1) == 5:\n rois = rois[:, 1:]\n abs_img_points = rel_roi_points.clone()\n abs_img_points[:, :, 0] = abs_img_points[:, :, 0] * (\n rois[:, None, 2] - rois[:, None, 0])\n abs_img_points[:, :, 1] = abs_img_points[:, :, 1] * (\n rois[:, None, 3] - rois[:, None, 1])\n abs_img_points[:, :, 0] += rois[:, None, 0]\n abs_img_points[:, :, 1] += rois[:, None, 1]\n return abs_img_points\n\n\ndef abs_img_point_to_rel_img_point(abs_img_points,\n img_shape,\n spatial_scale=1.):\n \"\"\"Convert image based absolute point coordinates to image based relative\n coordinates for sampling.\n\n Args:\n abs_img_points (Tensor): Image based absolute point coordinates,\n shape (N, P, 2)\n img_shape (tuple): (height, width) of image or feature map.\n spatial_scale (float): Scale points by this factor. Default: 1.\n\n Returns:\n Tensor: Image based relative point coordinates for sampling,\n shape (N, P, 2)\n \"\"\"\n\n assert isinstance(img_shape, tuple) and len(img_shape) == 2\n h, w = img_shape\n scale = torch.tensor([w, h],\n dtype=torch.float,\n device=abs_img_points.device)\n scale = scale.view(1, 1, 2)\n rel_img_points = abs_img_points / scale * spatial_scale\n\n return rel_img_points\n\n\ndef rel_roi_point_to_rel_img_point(rois,\n rel_roi_points,\n img_shape,\n spatial_scale=1.):\n \"\"\"Convert roi based relative point coordinates to image based absolute\n point coordinates.\n\n Args:\n rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)\n rel_roi_points (Tensor): Point coordinates inside RoI, relative to\n RoI, location, range (0, 1), shape (N, P, 2)\n img_shape (tuple): (height, width) of image or feature map.\n spatial_scale (float): Scale points by this factor. Default: 1.\n\n Returns:\n Tensor: Image based relative point coordinates for sampling,\n shape (N, P, 2)\n \"\"\"\n\n abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points)\n rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img_shape,\n spatial_scale)\n\n return rel_img_point\n\n\ndef point_sample(input, points, align_corners=False, **kwargs):\n \"\"\"A wrapper around :function:`grid_sample` to support 3D point_coords\n tensors Unlike :function:`torch.nn.functional.grid_sample` it assumes\n point_coords to lie inside [0, 1] x [0, 1] square.\n\n Args:\n input (Tensor): Feature map, shape (N, C, H, W).\n points (Tensor): Image based absolute point coordinates (normalized),\n range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).\n align_corners (bool): Whether align_corners. Default: False\n\n Returns:\n Tensor: Features of `point` on `input`, shape (N, C, P) or\n (N, C, Hgrid, Wgrid).\n \"\"\"\n\n add_dim = False\n if points.dim() == 3:\n add_dim = True\n points = points.unsqueeze(2)\n output = F.grid_sample(\n input, denormalize(points), align_corners=align_corners, **kwargs)\n if add_dim:\n output = output.squeeze(3)\n return output\n\n\nclass SimpleRoIAlign(nn.Module):\n\n def __init__(self, out_size, spatial_scale, aligned=True):\n \"\"\"Simple RoI align in PointRend, faster than standard RoIAlign.\n\n Args:\n out_size (tuple[int]): h, w\n spatial_scale (float): scale the input boxes by this number\n aligned (bool): if False, use the legacy implementation in\n MMDetection, align_corners=True will be used in F.grid_sample.\n If True, align the results more perfectly.\n \"\"\"\n\n super(SimpleRoIAlign, self).__init__()\n self.out_size = _pair(out_size)\n self.spatial_scale = float(spatial_scale)\n # to be consistent with other RoI ops\n self.use_torchvision = False\n self.aligned = aligned\n\n def forward(self, features, rois):\n\n num_imgs = features.size(0)\n num_rois = rois.size(0)\n rel_roi_points = generate_grid(\n num_rois, self.out_size, device=rois.device)\n\n point_feats = []\n for batch_ind in range(num_imgs):\n # unravel batch dim\n feat = features[batch_ind].unsqueeze(0)\n inds = (rois[:, 0].long() == batch_ind)\n if inds.any():\n rel_img_points = rel_roi_point_to_rel_img_point(\n rois[inds], rel_roi_points[inds], feat.shape[2:],\n self.spatial_scale).unsqueeze(0)\n point_feat = point_sample(\n feat, rel_img_points, align_corners=not self.aligned)\n point_feat = point_feat.squeeze(0).transpose(0, 1)\n point_feats.append(point_feat)\n\n channels = features.size(1)\n roi_feats = torch.cat(point_feats, dim=0)\n roi_feats = roi_feats.reshape(num_rois, channels, *self.out_size)\n\n return roi_feats\n\n def __repr__(self):\n format_str = self.__class__.__name__\n format_str += '(out_size={}, spatial_scale={}'.format(\n self.out_size, self.spatial_scale)\n return format_str\n",
"import numpy as np\nimport torch\n\nfrom mmdet.core import bbox2roi\nfrom mmdet.models.losses import SmoothL1Loss\nfrom ..builder import HEADS\nfrom .standard_roi_head import StandardRoIHead\n\n\[email protected]_module()\nclass DynamicRoIHead(StandardRoIHead):\n \"\"\"RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_.\"\"\"\n\n def __init__(self, **kwargs):\n super(DynamicRoIHead, self).__init__(**kwargs)\n assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)\n # the IoU history of the past `update_iter_interval` iterations\n self.iou_history = []\n # the beta history of the past `update_iter_interval` iterations\n self.beta_history = []\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n proposals (list[Tensors]): list of region proposals.\n\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n cur_iou = []\n for i in range(num_imgs):\n assign_result = self.bbox_assigner.assign(\n proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = self.bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n # record the `iou_topk`-th largest IoU in an image\n iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,\n len(assign_result.max_overlaps))\n ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)\n cur_iou.append(ious[-1].item())\n sampling_results.append(sampling_result)\n # average the current IoUs over images\n cur_iou = np.mean(cur_iou)\n self.iou_history.append(cur_iou)\n\n losses = dict()\n # bbox head forward and loss\n if self.with_bbox:\n bbox_results = self._bbox_forward_train(x, sampling_results,\n gt_bboxes, gt_labels,\n img_metas)\n losses.update(bbox_results['loss_bbox'])\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(x, sampling_results,\n bbox_results['bbox_feats'],\n gt_masks, img_metas)\n # TODO: Support empty tensor input. #2280\n if mask_results['loss_mask'] is not None:\n losses.update(mask_results['loss_mask'])\n\n # update IoU threshold and SmoothL1 beta\n update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval\n if len(self.iou_history) % update_iter_interval == 0:\n new_iou_thr, new_beta = self.update_hyperparameters()\n\n return losses\n\n def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,\n img_metas):\n num_imgs = len(img_metas)\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(x, rois)\n\n bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,\n gt_labels, self.train_cfg)\n # record the `beta_topk`-th smallest target\n # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets\n # and bbox_weights, respectively\n pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)\n num_pos = len(pos_inds)\n cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)\n beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,\n num_pos)\n cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()\n self.beta_history.append(cur_target)\n loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(loss_bbox=loss_bbox)\n return bbox_results\n\n def update_hyperparameters(self):\n \"\"\"\n Update hyperparameters like `iou_thr` and `SmoothL1 beta` based\n on the training statistics.\n\n Returns:\n tuple[float]: the updated `iou_thr` and `SmoothL1 beta`\n \"\"\"\n new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,\n np.mean(self.iou_history))\n self.iou_history = []\n self.bbox_assigner.pos_iou_thr = new_iou_thr\n self.bbox_assigner.neg_iou_thr = new_iou_thr\n self.bbox_assigner.min_pos_iou = new_iou_thr\n new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,\n np.median(self.beta_history))\n self.beta_history = []\n self.bbox_head.loss_bbox.beta = new_beta\n return new_iou_thr, new_beta\n",
"import torch\n\nfrom mmdet.core import force_fp32\nfrom mmdet.models.builder import ROI_EXTRACTORS\nfrom .base_roi_extractor import BaseRoIExtractor\n\n\n@ROI_EXTRACTORS.register_module()\nclass SingleRoIExtractor(BaseRoIExtractor):\n \"\"\"Extract RoI features from a single level feature map.\n\n If there are multiple input feature levels, each RoI is mapped to a level\n according to its scale. The mapping rule is proposed in\n `FPN <https://arxiv.org/abs/1612.03144>`_.\n\n Args:\n roi_layer (dict): Specify RoI layer type and arguments.\n out_channels (int): Output channels of RoI layers.\n featmap_strides (int): Strides of input feature maps.\n finest_scale (int): Scale threshold of mapping to level 0. Default: 56.\n \"\"\"\n\n def __init__(self,\n roi_layer,\n out_channels,\n featmap_strides,\n finest_scale=56):\n super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,\n featmap_strides)\n self.finest_scale = finest_scale\n\n def map_roi_levels(self, rois, num_levels):\n \"\"\"Map rois to corresponding feature levels by scales.\n\n - scale < finest_scale * 2: level 0\n - finest_scale * 2 <= scale < finest_scale * 4: level 1\n - finest_scale * 4 <= scale < finest_scale * 8: level 2\n - scale >= finest_scale * 8: level 3\n\n Args:\n rois (Tensor): Input RoIs, shape (k, 5).\n num_levels (int): Total level number.\n\n Returns:\n Tensor: Level index (0-based) of each RoI, shape (k, )\n \"\"\"\n scale = torch.sqrt(\n (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))\n target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))\n target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()\n return target_lvls\n\n @force_fp32(apply_to=('feats', ), out_fp16=True)\n def forward(self, feats, rois, roi_scale_factor=None):\n \"\"\"Forward function\"\"\"\n out_size = self.roi_layers[0].out_size\n num_levels = len(feats)\n roi_feats = feats[0].new_zeros(\n rois.size(0), self.out_channels, *out_size)\n\n if num_levels == 1:\n if len(rois) == 0:\n return roi_feats\n return self.roi_layers[0](feats[0], rois)\n\n target_lvls = self.map_roi_levels(rois, num_levels)\n if roi_scale_factor is not None:\n rois = self.roi_rescale(rois, roi_scale_factor)\n for i in range(num_levels):\n inds = target_lvls == i\n if inds.any():\n rois_ = rois[inds, :]\n roi_feats_t = self.roi_layers[i](feats[i], rois_)\n roi_feats[inds] = roi_feats_t\n else:\n roi_feats += sum(x.view(-1)[0] for x in self.parameters()) * 0.\n return roi_feats\n"
] | [
[
"torch.Size",
"torch.cat",
"torch.tensor",
"torch.no_grad",
"torch.nn.modules.utils._pair"
],
[
"torch.topk",
"numpy.median",
"numpy.mean",
"torch.kthvalue"
],
[
"torch.sqrt",
"torch.log2"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dylanhross/dmccs | [
"8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3",
"8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3"
] | [
"prediction/bimod/qglc/cactus_ccs_comp.py",
"prediction/bimod/BACs_and_TERF/C06_featurize.py"
] | [
"#!/Library/Frameworks/Python.framework/Versions/3.8/bin/python3\n\"\"\"\n\"\"\"\n\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\n# set up plot fonts\nfrom matplotlib import rcParams\nrcParams['font.family'] = 'sans-serif'\nrcParams['font.sans-serif'] = ['Helvetica', 'Arial']\nrcParams['font.size'] = 11\n\n\n# common settings for the same style across plots\nf_size = (3.5, 4)\nbs = {\n 'linewidth': 1., 'align': 'center', 'width': 0.75, 'capstyle': 'round', 'capsize': 6, \n 'error_kw': {\n 'elinewidth': 1., 'ecolor': 'k'\n }\n}\n\"\"\"\nbs = {\n 'fill': False, 'linewidth': 2, 'align': 'center', 'width': 0.8, 'capstyle': 'round', 'capsize': 6, 'hatch': '//'\n}\n\"\"\"\n\n\nfig = plt.figure(figsize=f_size)\n\nax = fig.add_subplot(111)\nax.axhline(202.8, c='k', ls=':', lw=1.5)\nax.axhline(209.5, c='k', ls=':', lw=1.5)\n\nlabels = ['7', '5', '3', \"3'\", \"4'\"]\nx = [3, 4, 5, 2, 1]\nccs = np.loadtxt('cactus_y.txt')\nec = ['orchid', 'yellow', 'lightpink', 'royalblue', 'darkorange']\n\nfor x_, ccs_, ec_ in zip(x, ccs, ec):\n ax.bar(x_, ccs_, edgecolor=ec_, ecolor=ec_, color=ec_, fill=True, **bs)\n ax.bar(x_, ccs_, fill=False, **bs)\n\nax.set_xticks([1, 2, 3, 4, 5])\nax.set_xticklabels(labels, fontstyle='italic', fontsize=14)\n\n\n\n#ax.set_xlim([150, 400])\nax.set_ylim([195, 215])\n\n\nfor d in ['top', 'right']:\n ax.spines[d].set_visible(False)\nax.set_ylabel(r'CCS ($\\AA^2$)')\n#ax.set_xlabel('m/z')\n\n# save figure\npng = 'qglc_cactus_comp.png'\nplt.savefig(png, dpi=400, bbox_inches='tight')\n\nplt.tight_layout()\n#plt.show()\n#plt.close()\n\n",
"#!/usr/local/Cellar/[email protected]/3.9.1_6/bin/python3 \n\"\"\"\n\n\"\"\"\n\nfrom pickle import load as pload\nfrom numpy import savetxt\nimport sys\n\n#from DmimData.data import DMD\nfrom helpers import featurize\n\n\ndef main():\n \"\"\" main execution sequence \"\"\"\n n = 5\n smis = ['C[N+](C)(C)CCCCCC' for _ in range(n)]\n structures = []\n for i in range(1, n + 1):\n fname = 'C06_c{}.out.mfj.xyzmq'.format(i)\n with open(fname, 'r') as f:\n structures.append(f.read())\n\n X_cust = featurize(smis, structures, ['hac', 'c', 'adb', 'asv', 'ctv', 'hbam', 'hbd'], ['pmi1', 'pmi2', 'pmi3', 'rmd02'])\n X_mqn = featurize(smis, structures, 'all', [])\n X_md3d = featurize(smis, structures, [], 'all')\n X_comb = featurize(smis, structures, 'all', 'all')\n\n savetxt('C06_X_CUST.txt', X_cust)\n savetxt('C06_X_MQN.txt', X_mqn)\n savetxt('C06_X_MD3D.txt', X_md3d)\n savetxt('C06_X_COMB.txt', X_comb)\n\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
],
[
"numpy.savetxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Xudewang/scarlet | [
"1d2a1806038cda8ac96e4c766a5cfa0b8ae5c1b7"
] | [
"scarlet/display.py"
] | [
"import numpy as np\nfrom astropy.visualization.lupton_rgb import LinearMapping, AsinhMapping\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle, Polygon\nfrom matplotlib.ticker import MaxNLocator\nfrom .bbox import Box\nfrom .component import Component\n\n\ndef channels_to_rgb(channels):\n \"\"\"Get the linear mapping of multiple channels to RGB channels\n The mapping created here assumes the the channels are ordered in wavelength\n direction, starting with the shortest wavelength. The mapping seeks to produce\n a relatively even weights for across all channels. It does not consider e.g.\n signal-to-noise variations across channels or human perception.\n Parameters\n ----------\n channels: int in range(0,7)\n Number of channels\n Returns\n -------\n array (3, channels) to map onto RGB\n \"\"\"\n assert channels in range(\n 0, 8\n ), \"No mapping has been implemented for more than {} channels\".format(channels)\n\n channel_map = np.zeros((3, channels))\n if channels == 1:\n channel_map[0, 0] = channel_map[1, 0] = channel_map[2, 0] = 1\n elif channels == 2:\n channel_map[0, 1] = 0.667\n channel_map[1, 1] = 0.333\n channel_map[1, 0] = 0.333\n channel_map[2, 0] = 0.667\n channel_map /= 0.667\n elif channels == 3:\n channel_map[0, 2] = 1\n channel_map[1, 1] = 1\n channel_map[2, 0] = 1\n elif channels == 4:\n channel_map[0, 3] = 1\n channel_map[0, 2] = 0.333\n channel_map[1, 2] = 0.667\n channel_map[1, 1] = 0.667\n channel_map[2, 1] = 0.333\n channel_map[2, 0] = 1\n channel_map /= 1.333\n elif channels == 5:\n channel_map[0, 4] = 1\n channel_map[0, 3] = 0.667\n channel_map[1, 3] = 0.333\n channel_map[1, 2] = 1\n channel_map[1, 1] = 0.333\n channel_map[2, 1] = 0.667\n channel_map[2, 0] = 1\n channel_map /= 1.667\n elif channels == 6:\n channel_map[0, 5] = 1\n channel_map[0, 4] = 0.667\n channel_map[0, 3] = 0.333\n channel_map[1, 4] = 0.333\n channel_map[1, 3] = 0.667\n channel_map[1, 2] = 0.667\n channel_map[1, 1] = 0.333\n channel_map[2, 2] = 0.333\n channel_map[2, 1] = 0.667\n channel_map[2, 0] = 1\n channel_map /= 2\n elif channels == 7:\n channel_map[:, 6] = 2/3.\n channel_map[0, 5] = 1\n channel_map[0, 4] = 0.667\n channel_map[0, 3] = 0.333\n channel_map[1, 4] = 0.333\n channel_map[1, 3] = 0.667\n channel_map[1, 2] = 0.667\n channel_map[1, 1] = 0.333\n channel_map[2, 2] = 0.333\n channel_map[2, 1] = 0.667\n channel_map[2, 0] = 1\n channel_map /= 2\n return channel_map\n\n\nclass LinearPercentileNorm(LinearMapping):\n def __init__(self, img, percentiles=[1, 99]):\n \"\"\"Create norm that is linear between lower and upper percentile of img\n Parameters\n ----------\n img: array_like\n Image to normalize\n percentile: array_like, default=[1,99]\n Lower and upper percentile to consider. Pixel values below will be\n set to zero, above to saturated.\n \"\"\"\n assert len(percentiles) == 2\n vmin, vmax = np.percentile(img, percentiles)\n super().__init__(minimum=vmin, maximum=vmax)\n\n\nclass AsinhPercentileNorm(AsinhMapping):\n def __init__(self, img, percentiles=[1, 99]):\n \"\"\"Create norm that is linear between lower and upper percentile of img\n Parameters\n ----------\n img: array_like\n Image to normalize\n percentile: array_like, default=[1,99]\n Lower and upper percentile to consider. Pixel values below will be\n set to zero, above to saturated.\n \"\"\"\n assert len(percentiles) == 2\n vmin, vmax = np.percentile(img, percentiles)\n # solution for beta assumes flat spectrum at vmax\n stretch = vmax - vmin\n beta = stretch / np.sinh(1)\n super().__init__(minimum=vmin, stretch=stretch, Q=beta)\n\n\ndef img_to_3channel(img, channel_map=None, fill_value=0):\n \"\"\"Convert multi-band image cube into 3 RGB channels\n Parameters\n ----------\n img: array_like\n This should be an array with dimensions (channels, height, width).\n channel_map: array_like\n Linear mapping with dimensions (3, channels)\n fill_value: float, default=`0`\n Value to use for any masked pixels.\n Returns\n -------\n RGB: numpy array with dtype float\n \"\"\"\n # expand single img into cube\n assert len(img.shape) in [2, 3]\n if len(img.shape) == 2:\n ny, nx = img.shape\n img_ = img.reshape(1, ny, nx)\n elif len(img.shape) == 3:\n img_ = img\n C = len(img_)\n\n # filterWeights: channel x band\n if channel_map is None:\n channel_map = channels_to_rgb(C)\n else:\n assert channel_map.shape == (3, len(img))\n\n # map channels onto RGB channels\n _, ny, nx = img_.shape\n rgb = np.dot(channel_map, img_.reshape(C, -1)).reshape(3, ny, nx)\n\n if hasattr(rgb, \"mask\"):\n rgb = rgb.filled(fill_value)\n\n return rgb\n\n\ndef img_to_rgb(img, channel_map=None, fill_value=0, norm=None, mask=None):\n \"\"\"Convert images to normalized RGB.\n If normalized values are outside of the range [0..255], they will be\n truncated such as to preserve the corresponding color.\n Parameters\n ----------\n img: array_like\n This should be an array with dimensions (channels, height, width).\n channel_map: array_like\n Linear mapping with dimensions (3, channels)\n fill_value: float, default=`0`\n Value to use for any masked pixels.\n norm: `scarlet.display.Norm`, default `None`\n Norm to use for mapping in the allowed range [0..255]. If `norm=None`,\n `scarlet.display.LinearPercentileNorm` will be used.\n mask: array_like\n A [0,1] binary mask to apply over the top of the image,\n where pixels with mask==1 are masked out.\n Returns\n -------\n rgb: numpy array with dimensions (3, height, width) and dtype uint8\n \"\"\"\n RGB = img_to_3channel(img, channel_map=channel_map)\n if norm is None:\n norm = LinearMapping(image=RGB)\n rgb = norm.make_rgb_image(*RGB)\n if mask is not None:\n rgb = np.dstack([rgb, ~mask * 255])\n return rgb\n\n\npanel_size = 4.0\n\n\ndef show_likelihood(blend, figsize=None, **kwargs):\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ax.plot(blend.log_likelihood, **kwargs)\n ax.set_xlabel(\"Iteration\")\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_ylabel(\"log-Likelihood\")\n return fig\n\n\ndef show_observation(\n observation,\n norm=None,\n channel_map=None,\n sky_coords=None,\n show_psf=False,\n add_labels=True,\n figsize=None,\n):\n \"\"\"Plot observation in standardized form.\n \"\"\"\n panels = 1 if show_psf is False else 2\n if figsize is None:\n figsize = (panel_size * panels, panel_size)\n fig, ax = plt.subplots(1, panels, figsize=figsize)\n if not hasattr(ax, \"__iter__\"):\n ax = (ax,)\n\n # Mask any pixels with zero weight in all bands\n mask = np.sum(observation.weights, axis=0) == 0\n # if there are no masked pixels, do not use a mask\n if np.all(mask == 0):\n mask = None\n\n panel = 0\n extent = get_extent(observation.bbox)\n ax[panel].imshow(\n img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Observation\")\n\n if add_labels:\n assert sky_coords is not None, \"Provide sky_coords for labeled objects\"\n\n for k, center in enumerate(sky_coords):\n center_ = observation.get_pixel(center)\n color = \"w\" if observation.C > 1 else \"r\"\n ax[panel].text(*center_[::-1], k, color=color, ha=\"center\", va=\"center\")\n\n panel += 1\n if show_psf:\n psf_image = np.zeros(observation.data.shape)\n\n if observation.psf is not None:\n psf_model = observation.psf.get_model()\n # make PSF as bright as the brightest pixel of the observation\n psf_model *= (\n observation.data.mean(axis=0).max() / psf_model.mean(axis=0).max()\n )\n # insert into middle of \"blank\" observation\n full_box = Box(psf_image.shape)\n shift = tuple(\n psf_image.shape[c] // 2 - psf_model.shape[c] // 2\n for c in range(full_box.D)\n )\n model_box = Box(psf_model.shape) + shift\n model_box.insert_into(psf_image, psf_model)\n # slices = scarlet.box.overlapped_slices\n ax[panel].imshow(img_to_rgb(psf_image, norm=norm), origin=\"lower\")\n ax[panel].set_title(\"PSF\")\n\n fig.tight_layout()\n return fig\n\n\ndef show_scene(\n sources,\n observation=None,\n norm=None,\n channel_map=None,\n show_model=True,\n show_observed=False,\n show_rendered=False,\n show_residual=False,\n add_labels=True,\n add_boxes=False,\n figsize=None,\n linear=True,\n):\n \"\"\"Plot all sources to recreate the scence.\n The functions provides a fast way of evaluating the quality of the entire model,\n i.e. the combination of all scences that seek to fit the observation.\n Parameters\n ----------\n sources: list of source models\n observation: `~scarlet.Observation`\n norm: norm to compress image intensity to the range [0,255]\n channel_map: array_like\n Linear mapping with dimensions (3, channels)\n show_model: bool\n Whether the model is shown in the model frame\n show_observed: bool\n Whether the observation is shown\n show_rendered: bool\n Whether the model, rendered to match the observation, is shown\n show_residual: bool\n Whether the residuals between rendered model and observation is shown\n add_label: bool\n Whether each source is labeled with its numerical index in the source list\n add_boxes: bool\n Whether each source box is shown\n figsize: matplotlib figsize argument\n linear: bool\n Whether or not to display the scene in a single line (`True`) or\n on multiple lines (`False`).\n Returns\n -------\n matplotlib figure\n \"\"\"\n if show_observed or show_rendered or show_residual:\n assert (\n observation is not None\n ), \"Provide matched observation to show observed frame\"\n\n panels = sum((show_model, show_observed, show_rendered, show_residual))\n if linear:\n if figsize is None:\n figsize = (panel_size * panels, panel_size)\n fig, ax = plt.subplots(1, panels, figsize=figsize)\n else:\n columns = int(np.ceil(panels / 2))\n if figsize is None:\n figsize = (panel_size * columns, panel_size * 2)\n fig = plt.figure(figsize=figsize)\n ax = [fig.add_subplot(2, columns, n + 1) for n in range(panels)]\n if not hasattr(ax, \"__iter__\"):\n ax = (ax,)\n\n # Mask any pixels with zero weight in all bands\n if observation is not None:\n mask = np.sum(observation.weights, axis=0) == 0\n # if there are no masked pixels, do not use a mask\n if np.all(mask == 0):\n mask = None\n\n model_frame = sources[0].frame\n model = np.zeros(model_frame.shape)\n for src in sources:\n model += src.get_model(frame=model_frame)\n\n panel = 0\n if show_model:\n extent = get_extent(model_frame.bbox)\n ax[panel].imshow(\n img_to_rgb(model, norm=norm, channel_map=channel_map),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Model\")\n panel += 1\n\n if show_rendered or show_residual:\n model = observation.render(model)\n extent = get_extent(observation.bbox)\n\n if show_rendered:\n ax[panel].imshow(\n img_to_rgb(model, norm=norm, channel_map=channel_map, mask=mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Model Rendered\")\n panel += 1\n\n if show_observed:\n ax[panel].imshow(\n img_to_rgb(observation.data, norm=norm, channel_map=channel_map, mask=mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Observation\")\n panel += 1\n\n if show_residual:\n residual = observation.data - model\n norm_ = LinearPercentileNorm(residual)\n ax[panel].imshow(\n img_to_rgb(residual, norm=norm_, channel_map=channel_map, mask=mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[panel].set_title(\"Residual\")\n panel += 1\n\n for k, src in enumerate(sources):\n if add_boxes:\n panel = 0\n box_kwargs = {\"facecolor\": \"none\", \"edgecolor\": \"w\", \"lw\": 0.5}\n if show_model:\n extent = get_extent(src.bbox)\n rect = Rectangle(\n (extent[0], extent[2]),\n extent[1] - extent[0],\n extent[3] - extent[2],\n **box_kwargs\n )\n ax[panel].add_artist(rect)\n panel = 1\n if observation is not None:\n start, stop = src.bbox.start[-2:][::-1], src.bbox.stop[-2:][::-1]\n points = (start, (start[0], stop[1]), stop, (stop[0], start[1]))\n coords = [\n observation.get_pixel(model_frame.get_sky_coord(p)) for p in points\n ]\n for panel in range(panel, panels):\n poly = Polygon(coords, closed=True, **box_kwargs)\n ax[panel].add_artist(poly)\n\n if add_labels and hasattr(src, \"center\") and src.center is not None:\n center = src.center\n panel = 0\n if show_model:\n ax[panel].text(*center[::-1], k, color=\"w\", ha=\"center\", va=\"center\")\n panel = 1\n if observation is not None:\n center_ = observation.get_pixel(model_frame.get_sky_coord(center))\n for panel in range(panel, panels):\n ax[panel].text(\n *center_[::-1], k, color=\"w\", ha=\"center\", va=\"center\"\n )\n\n fig.tight_layout()\n return fig\n\n\ndef get_extent(bbox):\n return [bbox.start[-1], bbox.stop[-1], bbox.start[-2], bbox.stop[-2]]\n\n\ndef show_sources(\n sources,\n observation=None,\n norm=None,\n channel_map=None,\n show_model=True,\n show_observed=False,\n show_rendered=False,\n show_spectrum=True,\n figsize=None,\n model_mask=None,\n add_markers=True,\n add_boxes=False,\n):\n \"\"\"Plot each source individually.\n The functions provides an more detailed inspection of every source in the list.\n Parameters\n ----------\n sources: list of source models\n observation: `~scarlet.Observation`\n norm: norm to compress image intensity to the range [0,255]\n channel_map: array_like\n Linear mapping with dimensions (3, channels)\n show_model: bool\n Whether the model is shown in the model frame\n show_observed: bool\n Whether the observation is shown\n show_rendered: bool\n Whether the model, rendered to match the observation, is shown\n show_spectrum: bool\n Whether source specturm is shown.\n For multi-component sources, spectra are shown separately.\n figsize: matplotlib figsize argument\n model_mask: array\n Mask used to hide pixels in the model only.\n add_markers: bool\n Whether or not to mark the centers of the sources\n with their source number.\n add_boxes: bool\n Whether source boxes are shown\n Returns\n -------\n matplotlib figure\n \"\"\"\n if show_observed or show_rendered:\n assert (\n observation is not None\n ), \"Provide matched observation to show observed frame\"\n\n panels = sum((show_model, show_observed, show_rendered, show_spectrum))\n if figsize is None:\n figsize = (panel_size * panels, panel_size * len(list(sources)))\n fig, ax = plt.subplots(len(list(sources)), panels, figsize=figsize, squeeze=False)\n\n marker_kwargs = {\"mew\": 1, \"ms\": 10}\n box_kwargs = {\"facecolor\": \"none\", \"edgecolor\": \"w\", \"lw\": 0.5}\n\n for k, src in enumerate(sources):\n\n model_frame = src.frame\n\n if hasattr(src, \"center\") and src.center is not None:\n center = np.array(src.center)[::-1]\n else:\n center = None\n\n if add_boxes:\n start, stop = src.bbox.start[-2:][::-1], src.bbox.stop[-2:][::-1]\n points = (start, (start[0], stop[1]), stop, (stop[0], start[1]))\n box_coords = [\n observation.get_pixel(model_frame.get_sky_coord(p)) for p in points\n ]\n\n # model in its bbox\n panel = 0\n model = src.get_model()\n\n if show_model:\n # Show the unrendered model in it's bbox\n extent = get_extent(src.bbox)\n ax[k][panel].imshow(\n img_to_rgb(model, norm=norm, channel_map=channel_map, mask=model_mask),\n extent=extent,\n origin=\"lower\",\n )\n ax[k][panel].set_title(\"Model Source {}\".format(k))\n if center is not None and add_markers:\n ax[k][panel].plot(*center, \"wx\", **marker_kwargs)\n panel += 1\n\n # model in observation frame\n if show_rendered:\n # Center and show the rendered model\n model_ = src.get_model(frame=model_frame)\n model_ = observation.render(model_)\n extent = get_extent(observation.bbox)\n ax[k][panel].imshow(\n img_to_rgb(model_, norm=norm, channel_map=channel_map),\n extent=extent,\n origin=\"lower\",\n )\n ax[k][panel].set_title(\"Model Source {} Rendered\".format(k))\n\n if center is not None and add_markers:\n center_ = observation.get_pixel(model_frame.get_sky_coord(center))\n ax[k][panel].plot(*center_, \"wx\", **marker_kwargs)\n if add_boxes:\n poly = Polygon(box_coords, closed=True, **box_kwargs)\n ax[k][panel].add_artist(poly)\n panel += 1\n\n if show_observed:\n # Center the observation on the source and display it\n _images = observation.data\n ax[k][panel].imshow(\n img_to_rgb(_images, norm=norm, channel_map=channel_map),\n extent=extent,\n origin=\"lower\",\n )\n ax[k][panel].set_title(\"Observation\".format(k))\n if center is not None and add_markers:\n center_ = observation.get_pixel(model_frame.get_sky_coord(center))\n ax[k][panel].plot(*center_, \"wx\", **marker_kwargs)\n if add_boxes:\n poly = Polygon(box_coords, closed=True, **box_kwargs)\n ax[k][panel].add_artist(poly)\n panel += 1\n\n if show_spectrum:\n # needs to be evaluated in the source box to prevent truncation\n if hasattr(src, \"__iter__\") and isinstance(src[0], Component):\n spectra = []\n for component in src:\n model_ = component.get_model()\n spectra.append(model_.sum(axis=(1, 2)))\n else:\n spectra = [model.sum(axis=(1, 2))]\n\n for spectrum in spectra:\n ax[k][panel].plot(spectrum)\n ax[k][panel].set_xticks(range(len(spectrum)))\n if hasattr(src.frame, \"channels\") and src.frame.channels is not None:\n ax[k][panel].set_xticklabels(src.frame.channels)\n ax[k][panel].set_title(\"Spectrum\")\n ax[k][panel].set_xlabel(\"Channel\")\n ax[k][panel].set_ylabel(\"Intensity\")\n\n fig.tight_layout()\n return fig\n"
] | [
[
"matplotlib.patches.Polygon",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"numpy.dstack",
"numpy.percentile",
"numpy.all",
"numpy.sinh",
"numpy.ceil",
"matplotlib.ticker.MaxNLocator",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jfrancis71/TensorFlowApps | [
"a9c61e2d5146c02715748221f51c656143b51b02"
] | [
"DownloadVGG_Faces.py"
] | [
"import os\nfrom PIL import Image\nimport urllib.request as ur\nimport urllib.request\nfrom io import BytesIO\nimport requests\nimport csv\nimport h5py\nimport numpy as np\nimport argparse\n\ndef retrieve_patch( rec ):\n response = requests.get( rec[1], timeout=10 )\n file = BytesIO( response.content )\n img = Image.open( file )\n ptch = img.crop( ( float(rec[2]),float(rec[3]),float(rec[4]), float(rec[5])) ).resize( (32,32) ).convert('L')\n return np.asarray( ptch, dtype=np.uint8 )\n\ndef retrieve_celeb( filename ):\n csvfile = open( filename, 'r')\n reader = csv.reader(csvfile, delimiter=' ')\n pts = []\n for row in reader:\n print( \"image = \", row[0] )\n if ( row[8] != '1' ):\n continue\n try:\n pt = retrieve_patch( row )\n pts.append( pt )\n except IOError as e:\n continue\n return pts\n\n#Parsing the command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-folder\",\n help=\"folder for the HDF5 file and subfolder files\")\nargs = parser.parse_args()\n\ncontent_list = os.listdir( os.path.join( args.folder, \"files\") )\n\ncelebs = []\nfor celeb in content_list[0:100]:\n print( \"Celeb\", celeb )\n pts = retrieve_celeb( os.path.join( args.folder, \"files\", celeb ) )\n celebs = celebs + pts\n\nfile = h5py.File( os.path.join( args.folder, \"dataset.hdf5\" ), 'w')\ndset = file.create_dataset(\"/patches\", data = celebs )\nfile.close()\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zaradana/Fast_BERT | [
"7ee96e99ba95468c29fe3542fe8071e0402ec0f6"
] | [
"0_fast_bert/prediction.py"
] | [
"import os\nimport torch\nfrom .data_cls import BertDataBunch\nfrom .data_ner import BertNERDataBunch\nfrom .learner_cls import BertLearner\nfrom .learner_ner import BertNERLearner\nimport time\n\nfrom transformers import AutoTokenizer\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n\n\nclass BertClassificationPredictor(object):\n def __init__(\n self,\n model_path,\n label_path,\n multi_label=False,\n model_type=\"bert\",\n use_fast_tokenizer=True,\n do_lower_case=True,\n device=None,\n ):\n if device is None:\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n self.model_path = model_path\n self.label_path = label_path\n self.multi_label = multi_label\n self.model_type = model_type\n self.do_lower_case = do_lower_case\n self.device = device\n\n # Use auto-tokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.model_path, use_fast=use_fast_tokenizer\n )\n\n self.learner = self.get_learner()\n\n def get_learner(self):\n databunch = BertDataBunch(\n self.label_path,\n self.label_path,\n self.tokenizer,\n train_file=None,\n val_file=None,\n batch_size_per_gpu=32,\n max_seq_length=512,\n multi_gpu=False,\n multi_label=self.multi_label,\n model_type=self.model_type,\n no_cache=True,\n )\n\n learner = BertLearner.from_pretrained_model(\n databunch,\n self.model_path,\n metrics=[],\n device=self.device,\n logger=None,\n output_dir=None,\n warmup_steps=0,\n multi_gpu=False,\n is_fp16=False,\n multi_label=self.multi_label,\n logging_steps=0,\n )\n\n return learner\n\n def predict_batch(self, texts):\n return self.learner.predict_batch(texts)\n\n def predict(self, text):\n predictions = self.predict_batch([text])[0]\n return predictions\n\n\nclass BertNERPredictor(object):\n def __init__(\n self,\n model_path,\n label_path,\n model_type=\"bert\",\n use_fast_tokenizer=True,\n do_lower_case=True,\n device=None,\n ):\n if device is None:\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n self.model_path = model_path\n self.label_path = label_path\n self.model_type = model_type\n self.do_lower_case = do_lower_case\n self.device = device\n\n # Use auto-tokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.model_path, use_fast=use_fast_tokenizer\n )\n\n self.learner = self.get_learner()\n\n def get_learner(self):\n databunch = BertNERDataBunch(\n self.label_path,\n self.tokenizer,\n train_file=None,\n val_file=None,\n batch_size_per_gpu=32,\n max_seq_length=512,\n multi_gpu=False,\n model_type=self.model_type,\n no_cache=True,\n )\n\n learner = BertNERLearner.from_pretrained_model(\n databunch,\n self.model_path,\n device=self.device,\n logger=None,\n output_dir=None,\n warmup_steps=0,\n multi_gpu=False,\n is_fp16=False,\n logging_steps=0,\n )\n\n return learner\n\n def predict_batch(self, texts, group=True, exclude_entities=[\"O\"]):\n predictions = []\n\n for text in texts:\n pred = self.predict(text, group=group, exclude_entities=exclude_entities)\n if pred:\n predictions.append({\"text\": text, \"results\": pred})\n return predictions\n\n def predict(self, text, group=True, exclude_entities=[\"O\"]):\n predictions = self.learner.predict(\n text, group=group, exclude_entities=exclude_entities\n )\n return predictions\n"
] | [
[
"torch.device",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.